blk-mq: move the call to blk_queue_enter_live out of blk_mq_get_request

Move the blk_queue_enter_live calls into the callers, where they can
successively be cleaned up.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2020-05-16 20:27:58 +02:00 committed by Jens Axboe
parent 870c153cf0
commit a5ea581105
1 changed files with 16 additions and 11 deletions

View File

@ -342,8 +342,6 @@ static struct request *blk_mq_get_request(struct request_queue *q,
bool clear_ctx_on_error = false;
u64 alloc_time_ns = 0;
blk_queue_enter_live(q);
/* alloc_time includes depth and tag waits */
if (blk_queue_rq_alloc_time(q))
alloc_time_ns = ktime_get_ns();
@ -379,7 +377,6 @@ static struct request *blk_mq_get_request(struct request_queue *q,
if (tag == BLK_MQ_TAG_FAIL) {
if (clear_ctx_on_error)
data->ctx = NULL;
blk_queue_exit(q);
return NULL;
}
@ -409,16 +406,19 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
if (ret)
return ERR_PTR(ret);
blk_queue_enter_live(q);
rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);
if (!rq)
return ERR_PTR(-EWOULDBLOCK);
goto out_queue_exit;
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;
return rq;
out_queue_exit:
blk_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
}
EXPORT_SYMBOL(blk_mq_alloc_request);
@ -450,21 +450,24 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
* Check if the hardware context is actually mapped to anything.
* If not tell the caller that it should skip this queue.
*/
ret = -EXDEV;
alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
blk_queue_exit(q);
return ERR_PTR(-EXDEV);
}
if (!blk_mq_hw_queue_mapped(alloc_data.hctx))
goto out_queue_exit;
cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
ret = -EWOULDBLOCK;
blk_queue_enter_live(q);
rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);
if (!rq)
return ERR_PTR(-EWOULDBLOCK);
goto out_queue_exit;
return rq;
out_queue_exit:
blk_queue_exit(q);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
@ -2043,8 +2046,10 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio);
data.cmd_flags = bio->bi_opf;
blk_queue_enter_live(q);
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
blk_queue_exit(q);
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);