blk: Fix lock inversion between ioc lock and bfqd lock
Lockdep complains about lock inversion between ioc->lock and bfqd->lock:
bfqd -> ioc:
put_io_context+0x33/0x90 -> ioc->lock grabbed
blk_mq_free_request+0x51/0x140
blk_put_request+0xe/0x10
blk_attempt_req_merge+0x1d/0x30
elv_attempt_insert_merge+0x56/0xa0
blk_mq_sched_try_insert_merge+0x4b/0x60
bfq_insert_requests+0x9e/0x18c0 -> bfqd->lock grabbed
blk_mq_sched_insert_requests+0xd6/0x2b0
blk_mq_flush_plug_list+0x154/0x280
blk_finish_plug+0x40/0x60
ext4_writepages+0x696/0x1320
do_writepages+0x1c/0x80
__filemap_fdatawrite_range+0xd7/0x120
sync_file_range+0xac/0xf0
ioc->bfqd:
bfq_exit_icq+0xa3/0xe0 -> bfqd->lock grabbed
put_io_context_active+0x78/0xb0 -> ioc->lock grabbed
exit_io_context+0x48/0x50
do_exit+0x7e9/0xdd0
do_group_exit+0x54/0xc0
To avoid this inversion we change blk_mq_sched_try_insert_merge() to not
free the merged request but rather leave that upto the caller similarly
to blk_mq_sched_try_merge(). And in bfq_insert_requests() we make sure
to free all the merged requests after dropping bfqd->lock.
Fixes: aee69d78de
("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler")
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Acked-by: Paolo Valente <paolo.valente@linaro.org>
Signed-off-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20210623093634.27879-3-jack@suse.cz
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a921c655f2
commit
fd2ef39cc9
|
@ -2345,9 +2345,9 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
|
|||
|
||||
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
|
||||
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
if (free)
|
||||
blk_mq_free_request(free);
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -5969,14 +5969,16 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
struct bfq_queue *bfqq;
|
||||
bool idle_timer_disabled = false;
|
||||
unsigned int cmd_flags;
|
||||
LIST_HEAD(free);
|
||||
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
|
||||
bfqg_stats_update_legacy_io(q, rq);
|
||||
#endif
|
||||
spin_lock_irq(&bfqd->lock);
|
||||
if (blk_mq_sched_try_insert_merge(q, rq)) {
|
||||
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
blk_mq_free_requests(&free);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -846,18 +846,15 @@ static struct request *attempt_front_merge(struct request_queue *q,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next)
|
||||
/*
|
||||
* Try to merge 'next' into 'rq'. Return true if the merge happened, false
|
||||
* otherwise. The caller is responsible for freeing 'next' if the merge
|
||||
* happened.
|
||||
*/
|
||||
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next)
|
||||
{
|
||||
struct request *free;
|
||||
|
||||
free = attempt_merge(q, rq, next);
|
||||
if (free) {
|
||||
blk_put_request(free);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return attempt_merge(q, rq, next);
|
||||
}
|
||||
|
||||
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
|
|
|
@ -399,9 +399,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
|||
return ret;
|
||||
}
|
||||
|
||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
|
||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
|
||||
struct list_head *free)
|
||||
{
|
||||
return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
|
||||
return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
|
||||
|
||||
|
|
|
@ -13,7 +13,8 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
|||
unsigned int nr_segs, struct request **merged_request);
|
||||
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
||||
unsigned int nr_segs);
|
||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
|
||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
|
||||
struct list_head *free);
|
||||
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
|
||||
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
|
||||
|
||||
|
|
|
@ -302,6 +302,17 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* Free all requests on the list */
|
||||
static inline void blk_mq_free_requests(struct list_head *list)
|
||||
{
|
||||
while (!list_empty(list)) {
|
||||
struct request *rq = list_entry_rq(list->next);
|
||||
|
||||
list_del_init(&rq->queuelist);
|
||||
blk_mq_free_request(rq);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For shared tag users, we track the number of currently active users
|
||||
* and attempt to provide a fair share of the tag depth for each of them.
|
||||
|
|
|
@ -224,7 +224,7 @@ ssize_t part_timeout_store(struct device *, struct device_attribute *,
|
|||
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
|
||||
int ll_back_merge_fn(struct request *req, struct bio *bio,
|
||||
unsigned int nr_segs);
|
||||
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next);
|
||||
unsigned int blk_recalc_rq_segments(struct request *rq);
|
||||
void blk_rq_set_mixed_merge(struct request *rq);
|
||||
|
|
|
@ -350,9 +350,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
|
|||
* we can append 'rq' to an existing request, so we can throw 'rq' away
|
||||
* afterwards.
|
||||
*
|
||||
* Returns true if we merged, false otherwise
|
||||
* Returns true if we merged, false otherwise. 'free' will contain all
|
||||
* requests that need to be freed.
|
||||
*/
|
||||
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
|
||||
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
|
||||
struct list_head *free)
|
||||
{
|
||||
struct request *__rq;
|
||||
bool ret;
|
||||
|
@ -363,8 +365,10 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
|
|||
/*
|
||||
* First try one-hit cache.
|
||||
*/
|
||||
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
|
||||
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
|
||||
list_add(&rq->queuelist, free);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (blk_queue_noxmerges(q))
|
||||
return false;
|
||||
|
@ -378,6 +382,7 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
|
|||
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
|
||||
break;
|
||||
|
||||
list_add(&rq->queuelist, free);
|
||||
/* The merged request could be merged with others, try again */
|
||||
ret = true;
|
||||
rq = __rq;
|
||||
|
|
|
@ -719,6 +719,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
struct dd_per_prio *per_prio;
|
||||
enum dd_prio prio;
|
||||
struct dd_blkcg *blkcg;
|
||||
LIST_HEAD(free);
|
||||
|
||||
lockdep_assert_held(&dd->lock);
|
||||
|
||||
|
@ -742,8 +743,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
WARN_ON_ONCE(rq->elv.priv[0]);
|
||||
rq->elv.priv[0] = blkcg;
|
||||
|
||||
if (blk_mq_sched_try_insert_merge(q, rq))
|
||||
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
|
||||
blk_mq_free_requests(&free);
|
||||
return;
|
||||
}
|
||||
|
||||
trace_block_rq_insert(rq);
|
||||
|
||||
|
|
|
@ -117,7 +117,8 @@ extern void elv_merge_requests(struct request_queue *, struct request *,
|
|||
struct request *);
|
||||
extern void elv_merged_request(struct request_queue *, struct request *,
|
||||
enum elv_merge);
|
||||
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
|
||||
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *,
|
||||
struct list_head *);
|
||||
extern struct request *elv_former_request(struct request_queue *, struct request *);
|
||||
extern struct request *elv_latter_request(struct request_queue *, struct request *);
|
||||
void elevator_init_mq(struct request_queue *q);
|
||||
|
|
Loading…
Reference in New Issue