blk-mq: improve the blk_mq_init_allocated_queue interface
Don't return the passed in request_queue but a normal error code, and drop the elevator_init argument in favor of just calling elevator_init_mq directly from dm-rq. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Link: https://lore.kernel.org/r/20210602065345.355274-3-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
cdb14e0f77
commit
26a9750aa8
|
@ -3115,21 +3115,18 @@ void blk_mq_release(struct request_queue *q)
|
|||
struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
|
||||
void *queuedata)
|
||||
{
|
||||
struct request_queue *uninit_q, *q;
|
||||
struct request_queue *q;
|
||||
int ret;
|
||||
|
||||
uninit_q = blk_alloc_queue(set->numa_node);
|
||||
if (!uninit_q)
|
||||
q = blk_alloc_queue(set->numa_node);
|
||||
if (!q)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
uninit_q->queuedata = queuedata;
|
||||
|
||||
/*
|
||||
* Initialize the queue without an elevator. device_add_disk() will do
|
||||
* the initialization.
|
||||
*/
|
||||
q = blk_mq_init_allocated_queue(set, uninit_q, false);
|
||||
if (IS_ERR(q))
|
||||
blk_cleanup_queue(uninit_q);
|
||||
|
||||
q->queuedata = queuedata;
|
||||
ret = blk_mq_init_allocated_queue(set, q);
|
||||
if (ret) {
|
||||
blk_cleanup_queue(q);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
return q;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_init_queue_data);
|
||||
|
@ -3273,9 +3270,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
|||
mutex_unlock(&q->sysfs_lock);
|
||||
}
|
||||
|
||||
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
struct request_queue *q,
|
||||
bool elevator_init)
|
||||
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
struct request_queue *q)
|
||||
{
|
||||
/* mark the queue as mq asap */
|
||||
q->mq_ops = set->ops;
|
||||
|
@ -3325,11 +3321,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
|||
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
|
||||
blk_mq_add_queue_tag_set(set, q);
|
||||
blk_mq_map_swqueue(q);
|
||||
|
||||
if (elevator_init)
|
||||
elevator_init_mq(q);
|
||||
|
||||
return q;
|
||||
return 0;
|
||||
|
||||
err_hctxs:
|
||||
kfree(q->queue_hw_ctx);
|
||||
|
@ -3340,7 +3332,7 @@ err_poll:
|
|||
q->poll_cb = NULL;
|
||||
err_exit:
|
||||
q->mq_ops = NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
|
||||
|
||||
|
|
|
@ -192,7 +192,6 @@ void blk_account_io_done(struct request *req, u64 now);
|
|||
|
||||
void blk_insert_flush(struct request *rq);
|
||||
|
||||
void elevator_init_mq(struct request_queue *q);
|
||||
int elevator_switch_mq(struct request_queue *q,
|
||||
struct elevator_type *new_e);
|
||||
void __elevator_exit(struct request_queue *, struct elevator_queue *);
|
||||
|
|
|
@ -693,7 +693,7 @@ void elevator_init_mq(struct request_queue *q)
|
|||
elevator_put(e);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(elevator_init_mq); /* only for dm-rq */
|
||||
|
||||
/*
|
||||
* switch to new_e io scheduler. be careful not to introduce deadlocks -
|
||||
|
|
|
@ -530,7 +530,6 @@ static const struct blk_mq_ops dm_mq_ops = {
|
|||
|
||||
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
|
||||
{
|
||||
struct request_queue *q;
|
||||
struct dm_target *immutable_tgt;
|
||||
int err;
|
||||
|
||||
|
@ -557,12 +556,10 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
|
|||
if (err)
|
||||
goto out_kfree_tag_set;
|
||||
|
||||
q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true);
|
||||
if (IS_ERR(q)) {
|
||||
err = PTR_ERR(q);
|
||||
err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
|
||||
if (err)
|
||||
goto out_tag_set;
|
||||
}
|
||||
|
||||
elevator_init_mq(md->queue);
|
||||
return 0;
|
||||
|
||||
out_tag_set:
|
||||
|
|
|
@ -429,9 +429,8 @@ enum {
|
|||
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
|
||||
struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
|
||||
void *queuedata);
|
||||
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
struct request_queue *q,
|
||||
bool elevator_init);
|
||||
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
struct request_queue *q);
|
||||
struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
|
||||
const struct blk_mq_ops *ops,
|
||||
unsigned int queue_depth,
|
||||
|
|
|
@ -120,6 +120,7 @@ extern void elv_merged_request(struct request_queue *, struct request *,
|
|||
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
|
||||
extern struct request *elv_former_request(struct request_queue *, struct request *);
|
||||
extern struct request *elv_latter_request(struct request_queue *, struct request *);
|
||||
void elevator_init_mq(struct request_queue *q);
|
||||
|
||||
/*
|
||||
* io scheduler registration
|
||||
|
|
Loading…
Reference in New Issue