block: Make q_usage_counter also track legacy requests
This patch makes it possible to pause request allocation for the legacy block layer by calling blk_mq_freeze_queue() and blk_mq_unfreeze_queue(). Signed-off-by: Ming Lei <ming.lei@redhat.com> [ bvanassche: Combined two patches into one, edited a comment and made sure REQ_NOWAIT is handled properly in blk_old_get_request() ] Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Tested-by: Martin Steigerwald <martin@lichtvoll.de> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
eb619fdb2d
commit
055f6e18e0
|
@ -612,6 +612,9 @@ void blk_set_queue_dying(struct request_queue *q)
|
|||
}
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
/* Make blk_queue_enter() reexamine the DYING flag. */
|
||||
wake_up_all(&q->mq_freeze_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
|
||||
|
||||
|
@ -1398,16 +1401,22 @@ static struct request *blk_old_get_request(struct request_queue *q,
|
|||
unsigned int op, gfp_t gfp_mask)
|
||||
{
|
||||
struct request *rq;
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON_ONCE(q->mq_ops);
|
||||
|
||||
/* create ioc upfront */
|
||||
create_io_context(gfp_mask, q->node);
|
||||
|
||||
ret = blk_queue_enter(q, !(gfp_mask & __GFP_DIRECT_RECLAIM) ||
|
||||
(op & REQ_NOWAIT));
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
rq = get_request(q, op, NULL, gfp_mask);
|
||||
if (IS_ERR(rq)) {
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
blk_queue_exit(q);
|
||||
return rq;
|
||||
}
|
||||
|
||||
|
@ -1579,6 +1588,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
|
|||
blk_free_request(rl, req);
|
||||
freed_request(rl, sync, rq_flags);
|
||||
blk_put_rl(rl);
|
||||
blk_queue_exit(q);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__blk_put_request);
|
||||
|
@ -1860,8 +1870,10 @@ get_rq:
|
|||
* Grab a free request. This is might sleep but can not fail.
|
||||
* Returns with the queue unlocked.
|
||||
*/
|
||||
blk_queue_enter_live(q);
|
||||
req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
|
||||
if (IS_ERR(req)) {
|
||||
blk_queue_exit(q);
|
||||
__wbt_done(q->rq_wb, wb_acct);
|
||||
if (PTR_ERR(req) == -ENOMEM)
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
|
|
|
@ -126,7 +126,8 @@ void blk_freeze_queue_start(struct request_queue *q)
|
|||
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
|
||||
if (freeze_depth == 1) {
|
||||
percpu_ref_kill(&q->q_usage_counter);
|
||||
blk_mq_run_hw_queues(q, false);
|
||||
if (q->mq_ops)
|
||||
blk_mq_run_hw_queues(q, false);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
|
||||
|
@ -256,13 +257,6 @@ void blk_mq_wake_waiters(struct request_queue *q)
|
|||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
if (blk_mq_hw_queue_mapped(hctx))
|
||||
blk_mq_tag_wakeup_all(hctx->tags, true);
|
||||
|
||||
/*
|
||||
* If we are called because the queue has now been marked as
|
||||
* dying, we need to ensure that processes currently waiting on
|
||||
* the queue are notified as well.
|
||||
*/
|
||||
wake_up_all(&q->mq_freeze_wq);
|
||||
}
|
||||
|
||||
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
|
||||
|
|
Loading…
Reference in New Issue