NVMe: Fix locking on abort handling
The queues and device need to be locked when messing with them. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
c9d3bf8810
commit
7a509a6b07
|
@ -1021,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
|
||||||
struct nvme_command cmd;
|
struct nvme_command cmd;
|
||||||
|
|
||||||
if (!nvmeq->qid || cmd_rq->aborted) {
|
if (!nvmeq->qid || cmd_rq->aborted) {
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dev_list_lock, flags);
|
||||||
if (work_busy(&dev->reset_work))
|
if (work_busy(&dev->reset_work))
|
||||||
return;
|
goto out;
|
||||||
list_del_init(&dev->node);
|
list_del_init(&dev->node);
|
||||||
dev_warn(&dev->pci_dev->dev,
|
dev_warn(&dev->pci_dev->dev,
|
||||||
"I/O %d QID %d timeout, reset controller\n",
|
"I/O %d QID %d timeout, reset controller\n",
|
||||||
req->tag, nvmeq->qid);
|
req->tag, nvmeq->qid);
|
||||||
dev->reset_workfn = nvme_reset_failed_dev;
|
dev->reset_workfn = nvme_reset_failed_dev;
|
||||||
queue_work(nvme_workq, &dev->reset_work);
|
queue_work(nvme_workq, &dev->reset_work);
|
||||||
|
out:
|
||||||
|
spin_unlock_irqrestore(&dev_list_lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1096,25 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
||||||
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
|
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
|
||||||
struct nvme_queue *nvmeq = cmd->nvmeq;
|
struct nvme_queue *nvmeq = cmd->nvmeq;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The aborted req will be completed on receiving the abort req.
|
||||||
|
* We enable the timer again. If hit twice, it'll cause a device reset,
|
||||||
|
* as the device then is in a faulty state.
|
||||||
|
*/
|
||||||
|
int ret = BLK_EH_RESET_TIMER;
|
||||||
|
|
||||||
dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
|
dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
|
||||||
nvmeq->qid);
|
nvmeq->qid);
|
||||||
|
|
||||||
|
spin_lock_irq(&nvmeq->q_lock);
|
||||||
if (!nvmeq->dev->initialized) {
|
if (!nvmeq->dev->initialized) {
|
||||||
/*
|
/*
|
||||||
* Force cancelled command frees the request, which requires we
|
* Force cancelled command frees the request, which requires we
|
||||||
* return BLK_EH_NOT_HANDLED.
|
* return BLK_EH_NOT_HANDLED.
|
||||||
*/
|
*/
|
||||||
nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
|
nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
|
||||||
return BLK_EH_NOT_HANDLED;
|
ret = BLK_EH_NOT_HANDLED;
|
||||||
}
|
} else
|
||||||
nvme_abort_req(req);
|
nvme_abort_req(req);
|
||||||
|
spin_unlock_irq(&nvmeq->q_lock);
|
||||||
|
|
||||||
/*
|
return ret;
|
||||||
* The aborted req will be completed on receiving the abort req.
|
|
||||||
* We enable the timer again. If hit twice, it'll cause a device reset,
|
|
||||||
* as the device then is in a faulty state.
|
|
||||||
*/
|
|
||||||
return BLK_EH_RESET_TIMER;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_free_queue(struct nvme_queue *nvmeq)
|
static void nvme_free_queue(struct nvme_queue *nvmeq)
|
||||||
|
|
Loading…
Reference in New Issue