nvme: introduce nvme_is_aen_req function
This function improves code readability and reduces code duplication. Signed-off-by: Israel Rukshin <israelr@mellanox.com> Signed-off-by: Max Gurtovoy <maxg@mellanox.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
bcde5f0fc7
commit
58a8df67e0
|
@ -445,6 +445,11 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
|
||||||
put_device(ctrl->device);
|
put_device(ctrl->device);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
|
||||||
|
{
|
||||||
|
return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
|
||||||
|
}
|
||||||
|
|
||||||
void nvme_complete_rq(struct request *req);
|
void nvme_complete_rq(struct request *req);
|
||||||
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
|
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
|
||||||
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||||
|
|
|
@ -967,8 +967,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||||
* aborts. We don't even bother to allocate a struct request
|
* aborts. We don't even bother to allocate a struct request
|
||||||
* for them but rather special case them here.
|
* for them but rather special case them here.
|
||||||
*/
|
*/
|
||||||
if (unlikely(nvmeq->qid == 0 &&
|
if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
|
||||||
cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
|
|
||||||
nvme_complete_async_event(&nvmeq->dev->ctrl,
|
nvme_complete_async_event(&nvmeq->dev->ctrl,
|
||||||
cqe->status, &cqe->result);
|
cqe->status, &cqe->result);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1501,8 +1501,8 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
* aborts. We don't even bother to allocate a struct request
|
* aborts. We don't even bother to allocate a struct request
|
||||||
* for them but rather special case them here.
|
* for them but rather special case them here.
|
||||||
*/
|
*/
|
||||||
if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
|
if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
|
||||||
cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
|
cqe->command_id)))
|
||||||
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
|
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
|
||||||
&cqe->result);
|
&cqe->result);
|
||||||
else
|
else
|
||||||
|
|
|
@ -491,8 +491,8 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
|
||||||
* aborts. We don't even bother to allocate a struct request
|
* aborts. We don't even bother to allocate a struct request
|
||||||
* for them but rather special case them here.
|
* for them but rather special case them here.
|
||||||
*/
|
*/
|
||||||
if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
|
if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
|
||||||
cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
|
cqe->command_id)))
|
||||||
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
|
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
|
||||||
&cqe->result);
|
&cqe->result);
|
||||||
else
|
else
|
||||||
|
|
|
@ -102,8 +102,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
|
||||||
* aborts. We don't even bother to allocate a struct request
|
* aborts. We don't even bother to allocate a struct request
|
||||||
* for them but rather special case them here.
|
* for them but rather special case them here.
|
||||||
*/
|
*/
|
||||||
if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
|
if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
|
||||||
cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
|
cqe->command_id))) {
|
||||||
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
|
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
|
||||||
&cqe->result);
|
&cqe->result);
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue