block: make bio_queue_enter() fast-path available inline
Just a prep patch for shifting the queue enter logic. This moves the expected fast path inline, and leaves __bio_queue_enter() as an out-of-line function call. We don't want to inline the latter, as it's mostly slow path code. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
71539717c1
commit
c98cb5bbda
|
@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_cleanup_queue);
|
EXPORT_SYMBOL(blk_cleanup_queue);
|
||||||
|
|
||||||
static bool blk_try_enter_queue(struct request_queue *q, bool pm)
|
|
||||||
{
|
|
||||||
rcu_read_lock();
|
|
||||||
if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The code that increments the pm_only counter must ensure that the
|
|
||||||
* counter is globally visible before the queue is unfrozen.
|
|
||||||
*/
|
|
||||||
if (blk_queue_pm_only(q) &&
|
|
||||||
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
|
|
||||||
goto fail_put;
|
|
||||||
|
|
||||||
rcu_read_unlock();
|
|
||||||
return true;
|
|
||||||
|
|
||||||
fail_put:
|
|
||||||
blk_queue_exit(q);
|
|
||||||
fail:
|
|
||||||
rcu_read_unlock();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_queue_enter() - try to increase q->q_usage_counter
|
* blk_queue_enter() - try to increase q->q_usage_counter
|
||||||
* @q: request queue pointer
|
* @q: request queue pointer
|
||||||
|
@ -442,10 +418,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int bio_queue_enter(struct bio *bio)
|
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
|
||||||
|
|
||||||
while (!blk_try_enter_queue(q, false)) {
|
while (!blk_try_enter_queue(q, false)) {
|
||||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||||
|
|
||||||
|
|
34
block/blk.h
34
block/blk.h
|
@ -55,6 +55,40 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
|
||||||
void blk_freeze_queue(struct request_queue *q);
|
void blk_freeze_queue(struct request_queue *q);
|
||||||
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
|
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
|
||||||
void blk_queue_start_drain(struct request_queue *q);
|
void blk_queue_start_drain(struct request_queue *q);
|
||||||
|
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
|
||||||
|
|
||||||
|
static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
|
||||||
|
{
|
||||||
|
rcu_read_lock();
|
||||||
|
if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The code that increments the pm_only counter must ensure that the
|
||||||
|
* counter is globally visible before the queue is unfrozen.
|
||||||
|
*/
|
||||||
|
if (blk_queue_pm_only(q) &&
|
||||||
|
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
|
||||||
|
goto fail_put;
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
return true;
|
||||||
|
|
||||||
|
fail_put:
|
||||||
|
blk_queue_exit(q);
|
||||||
|
fail:
|
||||||
|
rcu_read_unlock();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int bio_queue_enter(struct bio *bio)
|
||||||
|
{
|
||||||
|
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||||
|
|
||||||
|
if (blk_try_enter_queue(q, false))
|
||||||
|
return 0;
|
||||||
|
return __bio_queue_enter(q, bio);
|
||||||
|
}
|
||||||
|
|
||||||
#define BIO_INLINE_VECS 4
|
#define BIO_INLINE_VECS 4
|
||||||
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
|
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
|
||||||
|
|
Loading…
Reference in New Issue