blk-mq: don't handle non-flush requests in blk_insert_flush
Return to the normal blk_mq_submit_bio flow if the bio did not end up actually being a flush because the device didn't support it. Note that this is basically impossible to hit without special instrumentation given that submit_bio_checks already clears these flags usually, so we'd need a tight race to actually hit this code path. With this the call to blk_mq_run_hw_queue for the flush requests can be removed given that the actual flush requests are always issued via the requeue workqueue which runs the queue unconditionally. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20211019122553.2467817-1-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
dc5fc361d8
commit
d92ca9d834
|
@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
||||||
* @rq is being submitted. Analyze what needs to be done and put it on the
|
* @rq is being submitted. Analyze what needs to be done and put it on the
|
||||||
* right queue.
|
* right queue.
|
||||||
*/
|
*/
|
||||||
void blk_insert_flush(struct request *rq)
|
bool blk_insert_flush(struct request *rq)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
unsigned long fflags = q->queue_flags; /* may change, cache */
|
unsigned long fflags = q->queue_flags; /* may change, cache */
|
||||||
|
@ -409,7 +409,7 @@ void blk_insert_flush(struct request *rq)
|
||||||
*/
|
*/
|
||||||
if (!policy) {
|
if (!policy) {
|
||||||
blk_mq_end_request(rq, 0);
|
blk_mq_end_request(rq, 0);
|
||||||
return;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
|
BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
|
||||||
|
@ -420,10 +420,8 @@ void blk_insert_flush(struct request *rq)
|
||||||
* for normal execution.
|
* for normal execution.
|
||||||
*/
|
*/
|
||||||
if ((policy & REQ_FSEQ_DATA) &&
|
if ((policy & REQ_FSEQ_DATA) &&
|
||||||
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH)))
|
||||||
blk_mq_request_bypass_insert(rq, false, false);
|
return false;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @rq should go through flush machinery. Mark it part of flush
|
* @rq should go through flush machinery. Mark it part of flush
|
||||||
|
@ -439,6 +437,8 @@ void blk_insert_flush(struct request *rq)
|
||||||
spin_lock_irq(&fq->mq_flush_lock);
|
spin_lock_irq(&fq->mq_flush_lock);
|
||||||
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
|
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
|
||||||
spin_unlock_irq(&fq->mq_flush_lock);
|
spin_unlock_irq(&fq->mq_flush_lock);
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -2532,14 +2532,12 @@ void blk_mq_submit_bio(struct bio *bio)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(is_flush_fua)) {
|
if (is_flush_fua && blk_insert_flush(rq))
|
||||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
return;
|
||||||
/* Bypass scheduler for flush requests */
|
|
||||||
blk_insert_flush(rq);
|
if (plug && (q->nr_hw_queues == 1 ||
|
||||||
blk_mq_run_hw_queue(hctx, true);
|
blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
|
||||||
} else if (plug && (q->nr_hw_queues == 1 ||
|
q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
|
||||||
blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
|
|
||||||
q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
|
|
||||||
/*
|
/*
|
||||||
* Use plugging if we have a ->commit_rqs() hook as well, as
|
* Use plugging if we have a ->commit_rqs() hook as well, as
|
||||||
* we know the driver uses bd->last in a smart fashion.
|
* we know the driver uses bd->last in a smart fashion.
|
||||||
|
|
|
@ -236,7 +236,7 @@ void __blk_account_io_done(struct request *req, u64 now);
|
||||||
*/
|
*/
|
||||||
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
|
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
|
||||||
|
|
||||||
void blk_insert_flush(struct request *rq);
|
bool blk_insert_flush(struct request *rq);
|
||||||
|
|
||||||
int elevator_switch_mq(struct request_queue *q,
|
int elevator_switch_mq(struct request_queue *q,
|
||||||
struct elevator_type *new_e);
|
struct elevator_type *new_e);
|
||||||
|
|
Loading…
Reference in New Issue