nvme-rdma: factor out a nvme_rdma_end_request helper
Factor a small sniplet of duplicated code into a new helper in preparation for making this sniplet a little bit less trivial. Reviewed-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
40d09b53bf
commit
8446546cc2
|
@ -1149,6 +1149,15 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
|
|||
queue_work(nvme_reset_wq, &ctrl->err_work);
|
||||
}
|
||||
|
||||
static void nvme_rdma_end_request(struct nvme_rdma_request *req)
|
||||
{
|
||||
struct request *rq = blk_mq_rq_from_pdu(req);
|
||||
|
||||
if (!refcount_dec_and_test(&req->ref))
|
||||
return;
|
||||
nvme_end_request(rq, req->status, req->result);
|
||||
}
|
||||
|
||||
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
|
||||
const char *op)
|
||||
{
|
||||
|
@ -1173,16 +1182,11 @@ static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
{
|
||||
struct nvme_rdma_request *req =
|
||||
container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
|
||||
struct request *rq = blk_mq_rq_from_pdu(req);
|
||||
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS))
|
||||
nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
|
||||
return;
|
||||
}
|
||||
|
||||
if (refcount_dec_and_test(&req->ref))
|
||||
nvme_end_request(rq, req->status, req->result);
|
||||
|
||||
else
|
||||
nvme_rdma_end_request(req);
|
||||
}
|
||||
|
||||
static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
|
||||
|
@ -1547,15 +1551,11 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
|
||||
struct nvme_rdma_request *req =
|
||||
container_of(qe, struct nvme_rdma_request, sqe);
|
||||
struct request *rq = blk_mq_rq_from_pdu(req);
|
||||
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS))
|
||||
nvme_rdma_wr_error(cq, wc, "SEND");
|
||||
return;
|
||||
}
|
||||
|
||||
if (refcount_dec_and_test(&req->ref))
|
||||
nvme_end_request(rq, req->status, req->result);
|
||||
else
|
||||
nvme_rdma_end_request(req);
|
||||
}
|
||||
|
||||
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
|
||||
|
@ -1694,11 +1694,9 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
|
|||
nvme_rdma_error_recovery(queue->ctrl);
|
||||
}
|
||||
/* the local invalidation completion will end the request */
|
||||
return;
|
||||
} else {
|
||||
nvme_rdma_end_request(req);
|
||||
}
|
||||
|
||||
if (refcount_dec_and_test(&req->ref))
|
||||
nvme_end_request(rq, req->status, req->result);
|
||||
}
|
||||
|
||||
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
|
|
Loading…
Reference in New Issue