block: factor out a blk_try_enter_queue helper
Factor out the code to try to get q_usage_counter without blocking into a separate helper. Both to improve code readability and to prepare for splitting bio_queue_enter from blk_queue_enter. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Darrick J. Wong <djwong@kernel.org> Link: https://lore.kernel.org/r/20210929071241.934472-3-hch@lst.de Tested-by: Yi Zhang <yi.zhang@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
cc9c884dd7
commit
1f14a09890
|
@ -416,6 +416,30 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_cleanup_queue);
|
EXPORT_SYMBOL(blk_cleanup_queue);
|
||||||
|
|
||||||
|
static bool blk_try_enter_queue(struct request_queue *q, bool pm)
|
||||||
|
{
|
||||||
|
rcu_read_lock();
|
||||||
|
if (!percpu_ref_tryget_live(&q->q_usage_counter))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The code that increments the pm_only counter must ensure that the
|
||||||
|
* counter is globally visible before the queue is unfrozen.
|
||||||
|
*/
|
||||||
|
if (blk_queue_pm_only(q) &&
|
||||||
|
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
|
||||||
|
goto fail_put;
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
return true;
|
||||||
|
|
||||||
|
fail_put:
|
||||||
|
percpu_ref_put(&q->q_usage_counter);
|
||||||
|
fail:
|
||||||
|
rcu_read_unlock();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_queue_enter() - try to increase q->q_usage_counter
|
* blk_queue_enter() - try to increase q->q_usage_counter
|
||||||
* @q: request queue pointer
|
* @q: request queue pointer
|
||||||
|
@ -425,40 +449,18 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
|
||||||
{
|
{
|
||||||
const bool pm = flags & BLK_MQ_REQ_PM;
|
const bool pm = flags & BLK_MQ_REQ_PM;
|
||||||
|
|
||||||
while (true) {
|
while (!blk_try_enter_queue(q, pm)) {
|
||||||
bool success = false;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
|
|
||||||
/*
|
|
||||||
* The code that increments the pm_only counter is
|
|
||||||
* responsible for ensuring that that counter is
|
|
||||||
* globally visible before the queue is unfrozen.
|
|
||||||
*/
|
|
||||||
if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
|
|
||||||
!blk_queue_pm_only(q)) {
|
|
||||||
success = true;
|
|
||||||
} else {
|
|
||||||
percpu_ref_put(&q->q_usage_counter);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
if (success)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (flags & BLK_MQ_REQ_NOWAIT)
|
if (flags & BLK_MQ_REQ_NOWAIT)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* read pair of barrier in blk_freeze_queue_start(),
|
* read pair of barrier in blk_freeze_queue_start(), we need to
|
||||||
* we need to order reading __PERCPU_REF_DEAD flag of
|
* order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
|
||||||
* .q_usage_counter and reading .mq_freeze_depth or
|
* reading .mq_freeze_depth or queue dying flag, otherwise the
|
||||||
* queue dying flag, otherwise the following wait may
|
* following wait may never return if the two reads are
|
||||||
* never return if the two reads are reordered.
|
* reordered.
|
||||||
*/
|
*/
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
|
|
||||||
wait_event(q->mq_freeze_wq,
|
wait_event(q->mq_freeze_wq,
|
||||||
(!q->mq_freeze_depth &&
|
(!q->mq_freeze_depth &&
|
||||||
blk_pm_resume_queue(pm, q)) ||
|
blk_pm_resume_queue(pm, q)) ||
|
||||||
|
@ -466,6 +468,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
|
||||||
if (blk_queue_dying(q))
|
if (blk_queue_dying(q))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int bio_queue_enter(struct bio *bio)
|
static inline int bio_queue_enter(struct bio *bio)
|
||||||
|
|
Loading…
Reference in New Issue