block: Reduce the amount of memory required per request queue

Instead of always allocating at least nr_cpu_ids hardware queues per request
queue, reallocate q->queue_hw_ctx if it has to grow. This patch improves
behavior that was introduced by commit 868f2f0b72 ("blk-mq: dynamic h/w
context count").

Cc: Keith Busch <keith.busch@intel.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Bart Van Assche 2019-10-25 09:50:09 -07:00 committed by Jens Axboe
parent a9a808084d
commit ac0d6b926e
1 changed files with 17 additions and 7 deletions

View File

@ -2761,6 +2761,23 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int i, j, end; int i, j, end;
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
if (q->nr_hw_queues < set->nr_hw_queues) {
struct blk_mq_hw_ctx **new_hctxs;
new_hctxs = kcalloc_node(set->nr_hw_queues,
sizeof(*new_hctxs), GFP_KERNEL,
set->numa_node);
if (!new_hctxs)
return;
if (hctxs)
memcpy(new_hctxs, hctxs, q->nr_hw_queues *
sizeof(*hctxs));
q->queue_hw_ctx = new_hctxs;
q->nr_hw_queues = set->nr_hw_queues;
kfree(hctxs);
hctxs = new_hctxs;
}
/* protect against switching io scheduler */ /* protect against switching io scheduler */
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) { for (i = 0; i < set->nr_hw_queues; i++) {
@ -2848,12 +2865,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* init q->mq_kobj and sw queues' kobjects */ /* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q); blk_mq_sysfs_init(q);
q->queue_hw_ctx = kcalloc_node(nr_hw_queues(set),
sizeof(*(q->queue_hw_ctx)), GFP_KERNEL,
set->numa_node);
if (!q->queue_hw_ctx)
goto err_sys_init;
INIT_LIST_HEAD(&q->unused_hctx_list); INIT_LIST_HEAD(&q->unused_hctx_list);
spin_lock_init(&q->unused_hctx_lock); spin_lock_init(&q->unused_hctx_lock);
@ -2901,7 +2912,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
err_hctxs: err_hctxs:
kfree(q->queue_hw_ctx); kfree(q->queue_hw_ctx);
q->nr_hw_queues = 0; q->nr_hw_queues = 0;
err_sys_init:
blk_mq_sysfs_deinit(q); blk_mq_sysfs_deinit(q);
err_poll: err_poll:
blk_stat_free_callback(q->poll_cb); blk_stat_free_callback(q->poll_cb);