nvme-loop: handle cpu unplug when re-establishing the controller

If a cpu unplug event has occured, we need to take the minimum
of the provided nr_io_queues and the number of online cpus,
otherwise we won't be able to connect them as blk-mq mapping
won't dispatch to those queues.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
This commit is contained in:
Sagi Grimberg 2017-03-13 13:27:51 +02:00
parent c248c64387
commit 945dd5bacc
1 changed files with 50 additions and 38 deletions

View File

@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
struct nvme_loop_iod *iod, unsigned int queue_idx) struct nvme_loop_iod *iod, unsigned int queue_idx)
{ {
BUG_ON(queue_idx >= ctrl->queue_count);
iod->req.cmd = &iod->cmd; iod->req.cmd = &iod->cmd;
iod->req.rsp = &iod->rsp; iod->req.rsp = &iod->rsp;
iod->queue = &ctrl->queues[queue_idx]; iod->queue = &ctrl->queues[queue_idx];
@ -314,6 +312,43 @@ free_ctrl:
kfree(ctrl); kfree(ctrl);
} }
static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
{
int i;
for (i = 1; i < ctrl->queue_count; i++)
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
}
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
unsigned int nr_io_queues;
int ret, i;
nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret || !nr_io_queues)
return ret;
dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
for (i = 1; i <= nr_io_queues; i++) {
ctrl->queues[i].ctrl = ctrl;
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
if (ret)
goto out_destroy_queues;
ctrl->queue_count++;
}
return 0;
out_destroy_queues:
nvme_loop_destroy_io_queues(ctrl);
return ret;
}
static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{ {
int error; int error;
@ -385,17 +420,13 @@ out_free_sq:
static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{ {
int i;
nvme_stop_keep_alive(&ctrl->ctrl); nvme_stop_keep_alive(&ctrl->ctrl);
if (ctrl->queue_count > 1) { if (ctrl->queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set, blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_cancel_request, &ctrl->ctrl); nvme_cancel_request, &ctrl->ctrl);
nvme_loop_destroy_io_queues(ctrl);
for (i = 1; i < ctrl->queue_count; i++)
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
} }
if (ctrl->ctrl.state == NVME_CTRL_LIVE) if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
if (ret) if (ret)
goto out_disable; goto out_disable;
for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { ret = nvme_loop_init_io_queues(ctrl);
ctrl->queues[i].ctrl = ctrl; if (ret)
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); goto out_destroy_admin;
if (ret)
goto out_free_queues;
ctrl->queue_count++; for (i = 1; i < ctrl->queue_count; i++) {
}
for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
ret = nvmf_connect_io_queue(&ctrl->ctrl, i); ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret) if (ret)
goto out_free_queues; goto out_destroy_io;
} }
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
return; return;
out_free_queues: out_destroy_io:
for (i = 1; i < ctrl->queue_count; i++) nvme_loop_destroy_io_queues(ctrl);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); out_destroy_admin:
nvme_loop_destroy_admin_queue(ctrl); nvme_loop_destroy_admin_queue(ctrl);
out_disable: out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
{ {
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
int ret, i; int ret, i;
ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); ret = nvme_loop_init_io_queues(ctrl);
if (ret || !opts->nr_io_queues) if (ret)
return ret; return ret;
dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
opts->nr_io_queues);
for (i = 1; i <= opts->nr_io_queues; i++) {
ctrl->queues[i].ctrl = ctrl;
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
if (ret)
goto out_destroy_queues;
ctrl->queue_count++;
}
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
ctrl->tag_set.ops = &nvme_loop_mq_ops; ctrl->tag_set.ops = &nvme_loop_mq_ops;
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
goto out_free_tagset; goto out_free_tagset;
} }
for (i = 1; i <= opts->nr_io_queues; i++) { for (i = 1; i < ctrl->queue_count; i++) {
ret = nvmf_connect_io_queue(&ctrl->ctrl, i); ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret) if (ret)
goto out_cleanup_connect_q; goto out_cleanup_connect_q;
@ -588,8 +601,7 @@ out_cleanup_connect_q:
out_free_tagset: out_free_tagset:
blk_mq_free_tag_set(&ctrl->tag_set); blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues: out_destroy_queues:
for (i = 1; i < ctrl->queue_count; i++) nvme_loop_destroy_io_queues(ctrl);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
return ret; return ret;
} }