Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A few fixes for the current series that should go into -rc4. This contains: - a fix for a potential corruption of un-started requests from Ming. - a blk-stat fix from Omar, ensuring we flush the stat batch before checking nr_samples. - a set of fixes from Sagi for the nvmeof family" * 'for-linus' of git://git.kernel.dk/linux-block: blk-mq: don't complete un-started request in timeout handler nvme-loop: handle cpu unplug when re-establishing the controller nvme-rdma: handle cpu unplug when re-establishing the controller nvmet-rdma: Fix a possible uninitialized variable dereference nvmet: confirm sq percpu has scheduled and switched to atomic nvme-loop: fix a possible use-after-free when destroying the admin queue blk-stat: fix blk_stat_sum() if all samples are batched
This commit is contained in:
commit
04e904aa79
|
@ -697,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|||
{
|
||||
struct blk_mq_timeout_data *data = priv;
|
||||
|
||||
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
|
||||
/*
|
||||
* If a request wasn't started before the queue was
|
||||
* marked dying, kill it here or it'll go unnoticed.
|
||||
*/
|
||||
if (unlikely(blk_queue_dying(rq->q))) {
|
||||
rq->errors = -EIO;
|
||||
blk_mq_end_request(rq, rq->errors);
|
||||
}
|
||||
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
||||
return;
|
||||
}
|
||||
|
||||
if (time_after_eq(jiffies, rq->deadline)) {
|
||||
if (!blk_mark_rq_complete(rq))
|
||||
|
|
|
@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat)
|
|||
|
||||
static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
|
||||
{
|
||||
blk_stat_flush_batch(src);
|
||||
|
||||
if (!src->nr_samples)
|
||||
return;
|
||||
|
||||
blk_stat_flush_batch(src);
|
||||
|
||||
dst->min = min(dst->min, src->min);
|
||||
dst->max = max(dst->max, src->max);
|
||||
|
||||
|
|
|
@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
|
|||
struct ib_device *ibdev = dev->dev;
|
||||
int ret;
|
||||
|
||||
BUG_ON(queue_idx >= ctrl->queue_count);
|
||||
|
||||
ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
|
||||
DMA_TO_DEVICE);
|
||||
if (ret)
|
||||
|
@ -652,8 +650,22 @@ out_free_queues:
|
|||
|
||||
static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||
{
|
||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||
unsigned int nr_io_queues;
|
||||
int i, ret;
|
||||
|
||||
nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
|
||||
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctrl->queue_count = nr_io_queues + 1;
|
||||
if (ctrl->queue_count < 2)
|
||||
return 0;
|
||||
|
||||
dev_info(ctrl->ctrl.device,
|
||||
"creating %d I/O queues.\n", nr_io_queues);
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++) {
|
||||
ret = nvme_rdma_init_queue(ctrl, i,
|
||||
ctrl->ctrl.opts->queue_size);
|
||||
|
@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
|||
|
||||
static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||
{
|
||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||
int ret;
|
||||
|
||||
ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctrl->queue_count = opts->nr_io_queues + 1;
|
||||
if (ctrl->queue_count < 2)
|
||||
return 0;
|
||||
|
||||
dev_info(ctrl->ctrl.device,
|
||||
"creating %d I/O queues.\n", opts->nr_io_queues);
|
||||
|
||||
ret = nvme_rdma_init_io_queues(ctrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -425,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
|
|||
ctrl->sqs[qid] = sq;
|
||||
}
|
||||
|
||||
static void nvmet_confirm_sq(struct percpu_ref *ref)
|
||||
{
|
||||
struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
|
||||
|
||||
complete(&sq->confirm_done);
|
||||
}
|
||||
|
||||
void nvmet_sq_destroy(struct nvmet_sq *sq)
|
||||
{
|
||||
/*
|
||||
|
@ -433,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
|
|||
*/
|
||||
if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
|
||||
nvmet_async_events_free(sq->ctrl);
|
||||
percpu_ref_kill(&sq->ref);
|
||||
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
|
||||
wait_for_completion(&sq->confirm_done);
|
||||
wait_for_completion(&sq->free_done);
|
||||
percpu_ref_exit(&sq->ref);
|
||||
|
||||
|
@ -461,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
|
|||
return ret;
|
||||
}
|
||||
init_completion(&sq->free_done);
|
||||
init_completion(&sq->confirm_done);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
|
|||
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
|
||||
struct nvme_loop_iod *iod, unsigned int queue_idx)
|
||||
{
|
||||
BUG_ON(queue_idx >= ctrl->queue_count);
|
||||
|
||||
iod->req.cmd = &iod->cmd;
|
||||
iod->req.rsp = &iod->rsp;
|
||||
iod->queue = &ctrl->queues[queue_idx];
|
||||
|
@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = {
|
|||
|
||||
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
|
||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
||||
blk_mq_free_tag_set(&ctrl->admin_tag_set);
|
||||
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
|
||||
}
|
||||
|
||||
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
|
||||
|
@ -314,6 +312,43 @@ free_ctrl:
|
|||
kfree(ctrl);
|
||||
}
|
||||
|
||||
static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++)
|
||||
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
||||
}
|
||||
|
||||
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||
unsigned int nr_io_queues;
|
||||
int ret, i;
|
||||
|
||||
nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
|
||||
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
|
||||
if (ret || !nr_io_queues)
|
||||
return ret;
|
||||
|
||||
dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
|
||||
|
||||
for (i = 1; i <= nr_io_queues; i++) {
|
||||
ctrl->queues[i].ctrl = ctrl;
|
||||
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
|
||||
if (ret)
|
||||
goto out_destroy_queues;
|
||||
|
||||
ctrl->queue_count++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_destroy_queues:
|
||||
nvme_loop_destroy_io_queues(ctrl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
int error;
|
||||
|
@ -385,17 +420,13 @@ out_free_sq:
|
|||
|
||||
static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
int i;
|
||||
|
||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
||||
|
||||
if (ctrl->queue_count > 1) {
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
||||
nvme_cancel_request, &ctrl->ctrl);
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++)
|
||||
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
||||
nvme_loop_destroy_io_queues(ctrl);
|
||||
}
|
||||
|
||||
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
|
||||
|
@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
|
|||
if (ret)
|
||||
goto out_disable;
|
||||
|
||||
for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
|
||||
ctrl->queues[i].ctrl = ctrl;
|
||||
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
|
||||
if (ret)
|
||||
goto out_free_queues;
|
||||
ret = nvme_loop_init_io_queues(ctrl);
|
||||
if (ret)
|
||||
goto out_destroy_admin;
|
||||
|
||||
ctrl->queue_count++;
|
||||
}
|
||||
|
||||
for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
|
||||
for (i = 1; i < ctrl->queue_count; i++) {
|
||||
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
|
||||
if (ret)
|
||||
goto out_free_queues;
|
||||
goto out_destroy_io;
|
||||
}
|
||||
|
||||
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
||||
|
@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
|
|||
|
||||
return;
|
||||
|
||||
out_free_queues:
|
||||
for (i = 1; i < ctrl->queue_count; i++)
|
||||
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
||||
out_destroy_io:
|
||||
nvme_loop_destroy_io_queues(ctrl);
|
||||
out_destroy_admin:
|
||||
nvme_loop_destroy_admin_queue(ctrl);
|
||||
out_disable:
|
||||
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
|
||||
|
@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
|
|||
|
||||
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||
int ret, i;
|
||||
|
||||
ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
|
||||
if (ret || !opts->nr_io_queues)
|
||||
ret = nvme_loop_init_io_queues(ctrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
|
||||
opts->nr_io_queues);
|
||||
|
||||
for (i = 1; i <= opts->nr_io_queues; i++) {
|
||||
ctrl->queues[i].ctrl = ctrl;
|
||||
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
|
||||
if (ret)
|
||||
goto out_destroy_queues;
|
||||
|
||||
ctrl->queue_count++;
|
||||
}
|
||||
|
||||
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
|
||||
ctrl->tag_set.ops = &nvme_loop_mq_ops;
|
||||
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
|
||||
|
@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
|||
goto out_free_tagset;
|
||||
}
|
||||
|
||||
for (i = 1; i <= opts->nr_io_queues; i++) {
|
||||
for (i = 1; i < ctrl->queue_count; i++) {
|
||||
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
|
||||
if (ret)
|
||||
goto out_cleanup_connect_q;
|
||||
|
@ -588,8 +601,7 @@ out_cleanup_connect_q:
|
|||
out_free_tagset:
|
||||
blk_mq_free_tag_set(&ctrl->tag_set);
|
||||
out_destroy_queues:
|
||||
for (i = 1; i < ctrl->queue_count; i++)
|
||||
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
||||
nvme_loop_destroy_io_queues(ctrl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@ struct nvmet_sq {
|
|||
u16 qid;
|
||||
u16 size;
|
||||
struct completion free_done;
|
||||
struct completion confirm_done;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
|
|||
{
|
||||
u16 status;
|
||||
|
||||
cmd->queue = queue;
|
||||
cmd->n_rdma = 0;
|
||||
cmd->req.port = queue->port;
|
||||
|
||||
|
||||
ib_dma_sync_single_for_cpu(queue->dev->device,
|
||||
cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
|
||||
DMA_FROM_DEVICE);
|
||||
|
@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
|
||||
cmd->queue = queue;
|
||||
rsp = nvmet_rdma_get_rsp(queue);
|
||||
rsp->queue = queue;
|
||||
rsp->cmd = cmd;
|
||||
rsp->flags = 0;
|
||||
rsp->req.cmd = cmd->nvme_cmd;
|
||||
rsp->req.port = queue->port;
|
||||
rsp->n_rdma = 0;
|
||||
|
||||
if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
|
||||
unsigned long flags;
|
||||
|
|
Loading…
Reference in New Issue