nvme: factor request completion code into a common helper
This avoids duplicating the logic four times, and it also allows to keep some helpers static in core.c or just opencode them. Note that this loses printing the aborted status on completions in the PCI driver as that uses a data structure not available any more. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
4bca70d067
commit
77f02a7acd
|
@ -67,6 +67,35 @@ static DEFINE_SPINLOCK(dev_list_lock);
|
||||||
|
|
||||||
static struct class *nvme_class;
|
static struct class *nvme_class;
|
||||||
|
|
||||||
|
static inline bool nvme_req_needs_retry(struct request *req, u16 status)
|
||||||
|
{
|
||||||
|
return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
|
||||||
|
(jiffies - req->start_time) < req->timeout &&
|
||||||
|
req->retries < nvme_max_retries;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvme_complete_rq(struct request *req)
|
||||||
|
{
|
||||||
|
int error = 0;
|
||||||
|
|
||||||
|
if (unlikely(req->errors)) {
|
||||||
|
if (nvme_req_needs_retry(req, req->errors)) {
|
||||||
|
req->retries++;
|
||||||
|
blk_mq_requeue_request(req,
|
||||||
|
!blk_mq_queue_stopped(req->q));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (blk_rq_is_passthrough(req))
|
||||||
|
error = req->errors;
|
||||||
|
else
|
||||||
|
error = nvme_error_status(req->errors);
|
||||||
|
}
|
||||||
|
|
||||||
|
blk_mq_end_request(req, error);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(nvme_complete_rq);
|
||||||
|
|
||||||
void nvme_cancel_request(struct request *req, void *data, bool reserved)
|
void nvme_cancel_request(struct request *req, void *data, bool reserved)
|
||||||
{
|
{
|
||||||
int status;
|
int status;
|
||||||
|
@ -205,12 +234,6 @@ fail:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvme_requeue_req(struct request *req)
|
|
||||||
{
|
|
||||||
blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(nvme_requeue_req);
|
|
||||||
|
|
||||||
struct request *nvme_alloc_request(struct request_queue *q,
|
struct request *nvme_alloc_request(struct request_queue *q,
|
||||||
struct nvme_command *cmd, unsigned int flags, int qid)
|
struct nvme_command *cmd, unsigned int flags, int qid)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1925,29 +1925,13 @@ nvme_fc_complete_rq(struct request *rq)
|
||||||
{
|
{
|
||||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
||||||
struct nvme_fc_ctrl *ctrl = op->ctrl;
|
struct nvme_fc_ctrl *ctrl = op->ctrl;
|
||||||
int error = 0, state;
|
int state;
|
||||||
|
|
||||||
state = atomic_xchg(&op->state, FCPOP_STATE_IDLE);
|
state = atomic_xchg(&op->state, FCPOP_STATE_IDLE);
|
||||||
|
|
||||||
nvme_cleanup_cmd(rq);
|
nvme_cleanup_cmd(rq);
|
||||||
|
|
||||||
nvme_fc_unmap_data(ctrl, rq, op);
|
nvme_fc_unmap_data(ctrl, rq, op);
|
||||||
|
nvme_complete_rq(rq);
|
||||||
if (unlikely(rq->errors)) {
|
|
||||||
if (nvme_req_needs_retry(rq, rq->errors)) {
|
|
||||||
rq->retries++;
|
|
||||||
nvme_requeue_req(rq);
|
|
||||||
goto put_ctrl;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (blk_rq_is_passthrough(rq))
|
|
||||||
error = rq->errors;
|
|
||||||
else
|
|
||||||
error = nvme_error_status(rq->errors);
|
|
||||||
}
|
|
||||||
|
|
||||||
blk_mq_end_request(rq, error);
|
|
||||||
put_ctrl:
|
|
||||||
nvme_fc_ctrl_put(ctrl);
|
nvme_fc_ctrl_put(ctrl);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -261,13 +261,7 @@ static inline int nvme_error_status(u16 status)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool nvme_req_needs_retry(struct request *req, u16 status)
|
void nvme_complete_rq(struct request *req);
|
||||||
{
|
|
||||||
return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
|
|
||||||
(jiffies - req->start_time) < req->timeout &&
|
|
||||||
req->retries < nvme_max_retries;
|
|
||||||
}
|
|
||||||
|
|
||||||
void nvme_cancel_request(struct request *req, void *data, bool reserved);
|
void nvme_cancel_request(struct request *req, void *data, bool reserved);
|
||||||
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||||
enum nvme_ctrl_state new_state);
|
enum nvme_ctrl_state new_state);
|
||||||
|
@ -302,7 +296,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
|
||||||
#define NVME_QID_ANY -1
|
#define NVME_QID_ANY -1
|
||||||
struct request *nvme_alloc_request(struct request_queue *q,
|
struct request *nvme_alloc_request(struct request_queue *q,
|
||||||
struct nvme_command *cmd, unsigned int flags, int qid);
|
struct nvme_command *cmd, unsigned int flags, int qid);
|
||||||
void nvme_requeue_req(struct request *req);
|
|
||||||
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
||||||
struct nvme_command *cmd);
|
struct nvme_command *cmd);
|
||||||
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
||||||
|
|
|
@ -628,34 +628,12 @@ out_free_cmd:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_complete_rq(struct request *req)
|
static void nvme_pci_complete_rq(struct request *req)
|
||||||
{
|
{
|
||||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||||
struct nvme_dev *dev = iod->nvmeq->dev;
|
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
nvme_unmap_data(dev, req);
|
nvme_unmap_data(iod->nvmeq->dev, req);
|
||||||
|
nvme_complete_rq(req);
|
||||||
if (unlikely(req->errors)) {
|
|
||||||
if (nvme_req_needs_retry(req, req->errors)) {
|
|
||||||
req->retries++;
|
|
||||||
nvme_requeue_req(req);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (blk_rq_is_passthrough(req))
|
|
||||||
error = req->errors;
|
|
||||||
else
|
|
||||||
error = nvme_error_status(req->errors);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(iod->aborted)) {
|
|
||||||
dev_warn(dev->ctrl.device,
|
|
||||||
"completing aborted command with status: %04x\n",
|
|
||||||
req->errors);
|
|
||||||
}
|
|
||||||
|
|
||||||
blk_mq_end_request(req, error);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We read the CQE phase first to check if the rest of the entry is valid */
|
/* We read the CQE phase first to check if the rest of the entry is valid */
|
||||||
|
@ -1131,7 +1109,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
|
||||||
|
|
||||||
static const struct blk_mq_ops nvme_mq_admin_ops = {
|
static const struct blk_mq_ops nvme_mq_admin_ops = {
|
||||||
.queue_rq = nvme_queue_rq,
|
.queue_rq = nvme_queue_rq,
|
||||||
.complete = nvme_complete_rq,
|
.complete = nvme_pci_complete_rq,
|
||||||
.init_hctx = nvme_admin_init_hctx,
|
.init_hctx = nvme_admin_init_hctx,
|
||||||
.exit_hctx = nvme_admin_exit_hctx,
|
.exit_hctx = nvme_admin_exit_hctx,
|
||||||
.init_request = nvme_admin_init_request,
|
.init_request = nvme_admin_init_request,
|
||||||
|
@ -1140,7 +1118,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
|
||||||
|
|
||||||
static const struct blk_mq_ops nvme_mq_ops = {
|
static const struct blk_mq_ops nvme_mq_ops = {
|
||||||
.queue_rq = nvme_queue_rq,
|
.queue_rq = nvme_queue_rq,
|
||||||
.complete = nvme_complete_rq,
|
.complete = nvme_pci_complete_rq,
|
||||||
.init_hctx = nvme_init_hctx,
|
.init_hctx = nvme_init_hctx,
|
||||||
.init_request = nvme_init_request,
|
.init_request = nvme_init_request,
|
||||||
.map_queues = nvme_pci_map_queues,
|
.map_queues = nvme_pci_map_queues,
|
||||||
|
|
|
@ -1518,25 +1518,9 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
|
||||||
static void nvme_rdma_complete_rq(struct request *rq)
|
static void nvme_rdma_complete_rq(struct request *rq)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
||||||
struct nvme_rdma_queue *queue = req->queue;
|
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
nvme_rdma_unmap_data(queue, rq);
|
nvme_rdma_unmap_data(req->queue, rq);
|
||||||
|
nvme_complete_rq(rq);
|
||||||
if (unlikely(rq->errors)) {
|
|
||||||
if (nvme_req_needs_retry(rq, rq->errors)) {
|
|
||||||
rq->retries++;
|
|
||||||
nvme_requeue_req(rq);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (blk_rq_is_passthrough(rq))
|
|
||||||
error = rq->errors;
|
|
||||||
else
|
|
||||||
error = nvme_error_status(rq->errors);
|
|
||||||
}
|
|
||||||
|
|
||||||
blk_mq_end_request(rq, error);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct blk_mq_ops nvme_rdma_mq_ops = {
|
static const struct blk_mq_ops nvme_rdma_mq_ops = {
|
||||||
|
|
|
@ -91,25 +91,10 @@ static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
|
||||||
static void nvme_loop_complete_rq(struct request *req)
|
static void nvme_loop_complete_rq(struct request *req)
|
||||||
{
|
{
|
||||||
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
|
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
nvme_cleanup_cmd(req);
|
nvme_cleanup_cmd(req);
|
||||||
sg_free_table_chained(&iod->sg_table, true);
|
sg_free_table_chained(&iod->sg_table, true);
|
||||||
|
nvme_complete_rq(req);
|
||||||
if (unlikely(req->errors)) {
|
|
||||||
if (nvme_req_needs_retry(req, req->errors)) {
|
|
||||||
req->retries++;
|
|
||||||
nvme_requeue_req(req);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (blk_rq_is_passthrough(req))
|
|
||||||
error = req->errors;
|
|
||||||
else
|
|
||||||
error = nvme_error_status(req->errors);
|
|
||||||
}
|
|
||||||
|
|
||||||
blk_mq_end_request(req, error);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
|
static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
|
||||||
|
|
Loading…
Reference in New Issue