block: don't protect generic_make_request_checks with blk_queue_enter

Now a063057d7c ("block: Fix a race between request queue removal and
the block cgroup controller") has been reverted, and blkcg_exit_queue()
won't be called in blk_cleanup_queue() any more.

So don't need to protect generic_make_request_checks() with
blk_queue_enter(), then the total mess can be cleaned.

37f9579f4c ("blk-mq: Avoid that submitting a bio concurrently with device
removal triggers a crash") is reverted.

Cc: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei 2019-05-15 11:03:09 +08:00 committed by Jens Axboe
parent 47cdee29ef
commit fe2008640a
1 changed files with 6 additions and 31 deletions

View File

@ -957,22 +957,8 @@ blk_qc_t generic_make_request(struct bio *bio)
* yet. * yet.
*/ */
struct bio_list bio_list_on_stack[2]; struct bio_list bio_list_on_stack[2];
blk_mq_req_flags_t flags = 0;
struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE; blk_qc_t ret = BLK_QC_T_NONE;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
if (bio_flagged(bio, BIO_QUEUE_ENTERED))
blk_queue_enter_live(q);
else if (blk_queue_enter(q, flags) < 0) {
if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
bio_io_error(bio);
return ret;
}
if (!generic_make_request_checks(bio)) if (!generic_make_request_checks(bio))
goto out; goto out;
@ -1009,22 +995,11 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]); bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack; current->bio_list = bio_list_on_stack;
do { do {
bool enter_succeeded = true; struct request_queue *q = bio->bi_disk->queue;
blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
BLK_MQ_REQ_NOWAIT : 0;
if (unlikely(q != bio->bi_disk->queue)) { if (likely(blk_queue_enter(q, flags) == 0)) {
if (q)
blk_queue_exit(q);
q = bio->bi_disk->queue;
flags = 0;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
if (blk_queue_enter(q, flags) < 0) {
enter_succeeded = false;
q = NULL;
}
}
if (enter_succeeded) {
struct bio_list lower, same; struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */ /* Create a fresh bio_list for all subordinate requests */
@ -1032,6 +1007,8 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]); bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio); ret = q->make_request_fn(q, bio);
blk_queue_exit(q);
/* sort new bios into those for a lower level /* sort new bios into those for a lower level
* and those for the same level * and those for the same level
*/ */
@ -1058,8 +1035,6 @@ blk_qc_t generic_make_request(struct bio *bio)
current->bio_list = NULL; /* deactivate */ current->bio_list = NULL; /* deactivate */
out: out:
if (q)
blk_queue_exit(q);
return ret; return ret;
} }
EXPORT_SYMBOL(generic_make_request); EXPORT_SYMBOL(generic_make_request);