blk-mq: update hctx->nr_active in blk_mq_end_request_batch()
In case of shared tags and none io sched, batched completion still may
be run into, and hctx->nr_active is accounted when getting driver tag,
so it has to be updated in blk_mq_end_request_batch().
Otherwise, hctx->nr_active may become same with queue depth, then
hctx_may_queue() always return false, then io hang is caused.
Fixes the issue by updating the counter in batched way.
Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Fixes: f794f3351f
("block: add support for blk_mq_end_request_batch()")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20211102153619.3627505-4-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
62ba0c008f
commit
3b87c6ea67
|
@ -818,6 +818,13 @@ static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
|
||||||
{
|
{
|
||||||
struct request_queue *q = hctx->queue;
|
struct request_queue *q = hctx->queue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* All requests should have been marked as RQF_MQ_INFLIGHT, so
|
||||||
|
* update hctx->nr_active in batch
|
||||||
|
*/
|
||||||
|
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
|
||||||
|
__blk_mq_sub_active_requests(hctx, nr_tags);
|
||||||
|
|
||||||
blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
|
blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
|
||||||
percpu_ref_put_many(&q->q_usage_counter, nr_tags);
|
percpu_ref_put_many(&q->q_usage_counter, nr_tags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -225,12 +225,18 @@ static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
|
||||||
atomic_inc(&hctx->nr_active);
|
atomic_inc(&hctx->nr_active);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
|
static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
|
||||||
|
int val)
|
||||||
{
|
{
|
||||||
if (blk_mq_is_shared_tags(hctx->flags))
|
if (blk_mq_is_shared_tags(hctx->flags))
|
||||||
atomic_dec(&hctx->queue->nr_active_requests_shared_tags);
|
atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
|
||||||
else
|
else
|
||||||
atomic_dec(&hctx->nr_active);
|
atomic_sub(val, &hctx->nr_active);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
|
||||||
|
{
|
||||||
|
__blk_mq_sub_active_requests(hctx, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
|
static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
|
||||||
|
|
Loading…
Reference in New Issue