blk-mq: abstract out blk-mq-sched rq list iteration bio merge helper
No functional changes in this patch, just a prep patch for utilizing this in an IO scheduler. Signed-off-by: Jens Axboe <axboe@kernel.dk> Reviewed-by: Omar Sandoval <osandov@fb.com>
This commit is contained in:
parent
5de815a7ee
commit
9c55873464
|
@ -268,19 +268,16 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
|||
EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
|
||||
|
||||
/*
|
||||
* Reverse check our software queue for entries that we could potentially
|
||||
* merge with. Currently includes a hand-wavy stop count of 8, to not spend
|
||||
* too much time checking for merges.
|
||||
* Iterate list of requests and see if we can merge this bio with any
|
||||
* of them.
|
||||
*/
|
||||
static bool blk_mq_attempt_merge(struct request_queue *q,
|
||||
struct blk_mq_ctx *ctx, struct bio *bio)
|
||||
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct request *rq;
|
||||
int checked = 8;
|
||||
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
|
||||
list_for_each_entry_reverse(rq, list, queuelist) {
|
||||
bool merged = false;
|
||||
|
||||
if (!checked--)
|
||||
|
@ -305,13 +302,30 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (merged)
|
||||
ctx->rq_merged++;
|
||||
return merged;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);
|
||||
|
||||
/*
|
||||
* Reverse check our software queue for entries that we could potentially
|
||||
* merge with. Currently includes a hand-wavy stop count of 8, to not spend
|
||||
* too much time checking for merges.
|
||||
*/
|
||||
static bool blk_mq_attempt_merge(struct request_queue *q,
|
||||
struct blk_mq_ctx *ctx, struct bio *bio)
|
||||
{
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) {
|
||||
ctx->rq_merged++;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
|
|
|
@ -259,7 +259,8 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
|||
void blk_mq_kick_requeue_list(struct request_queue *q);
|
||||
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
|
||||
void blk_mq_complete_request(struct request *rq);
|
||||
|
||||
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
|
||||
struct bio *bio);
|
||||
bool blk_mq_queue_stopped(struct request_queue *q);
|
||||
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||
|
|
Loading…
Reference in New Issue