block: fix plug list flushing for nomerge queues
Request queues with merging disabled will not flush the plug list after BLK_MAX_REQUEST_COUNT requests have been queued, since the code relies on blk_attempt_plug_merge to compute the request_count. Fix this by computing the number of queued requests even for nomerge queues. Signed-off-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
3380f4589f
commit
0809e3ac62
|
@ -1594,6 +1594,30 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned int blk_plug_queued_count(struct request_queue *q)
|
||||||
|
{
|
||||||
|
struct blk_plug *plug;
|
||||||
|
struct request *rq;
|
||||||
|
struct list_head *plug_list;
|
||||||
|
unsigned int ret = 0;
|
||||||
|
|
||||||
|
plug = current->plug;
|
||||||
|
if (!plug)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (q->mq_ops)
|
||||||
|
plug_list = &plug->mq_list;
|
||||||
|
else
|
||||||
|
plug_list = &plug->list;
|
||||||
|
|
||||||
|
list_for_each_entry(rq, plug_list, queuelist) {
|
||||||
|
if (rq->q == q)
|
||||||
|
ret++;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
void init_request_from_bio(struct request *req, struct bio *bio)
|
void init_request_from_bio(struct request *req, struct bio *bio)
|
||||||
{
|
{
|
||||||
req->cmd_type = REQ_TYPE_FS;
|
req->cmd_type = REQ_TYPE_FS;
|
||||||
|
@ -1641,9 +1665,11 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||||
* Check if we can merge with the plugged list before grabbing
|
* Check if we can merge with the plugged list before grabbing
|
||||||
* any locks.
|
* any locks.
|
||||||
*/
|
*/
|
||||||
if (!blk_queue_nomerges(q) &&
|
if (!blk_queue_nomerges(q)) {
|
||||||
blk_attempt_plug_merge(q, bio, &request_count, NULL))
|
if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
|
||||||
return;
|
return;
|
||||||
|
} else
|
||||||
|
request_count = blk_plug_queued_count(q);
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
|
|
||||||
|
|
|
@ -1268,9 +1268,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
|
|
||||||
blk_queue_split(q, &bio, q->bio_split);
|
blk_queue_split(q, &bio, q->bio_split);
|
||||||
|
|
||||||
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
if (!is_flush_fua && !blk_queue_nomerges(q)) {
|
||||||
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
|
if (blk_attempt_plug_merge(q, bio, &request_count,
|
||||||
|
&same_queue_rq))
|
||||||
return;
|
return;
|
||||||
|
} else
|
||||||
|
request_count = blk_plug_queued_count(q);
|
||||||
|
|
||||||
rq = blk_mq_map_request(q, bio, &data);
|
rq = blk_mq_map_request(q, bio, &data);
|
||||||
if (unlikely(!rq))
|
if (unlikely(!rq))
|
||||||
|
|
|
@ -86,6 +86,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
|
||||||
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
unsigned int *request_count,
|
unsigned int *request_count,
|
||||||
struct request **same_queue_rq);
|
struct request **same_queue_rq);
|
||||||
|
unsigned int blk_plug_queued_count(struct request_queue *q);
|
||||||
|
|
||||||
void blk_account_io_start(struct request *req, bool new_io);
|
void blk_account_io_start(struct request *req, bool new_io);
|
||||||
void blk_account_io_completion(struct request *req, unsigned int bytes);
|
void blk_account_io_completion(struct request *req, unsigned int bytes);
|
||||||
|
|
Loading…
Reference in New Issue