block: convert the rest of block to bdev_get_queue
Convert bdev->bd_disk->queue to bdev_get_queue(), it's uses a cached queue pointer and so is faster. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/addf6ea988c04213697ba3684c853e4ed7642a39.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
eab4e02733
commit
ed6cddefdf
|
@ -134,7 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
|
||||||
iv = bip->bip_vec + bip->bip_vcnt;
|
iv = bip->bip_vec + bip->bip_vcnt;
|
||||||
|
|
||||||
if (bip->bip_vcnt &&
|
if (bip->bip_vcnt &&
|
||||||
bvec_gap_to_prev(bio->bi_bdev->bd_disk->queue,
|
bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
|
||||||
&bip->bip_vec[bip->bip_vcnt - 1], offset))
|
&bip->bip_vec[bip->bip_vcnt - 1], offset))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -621,7 +621,7 @@ struct block_device *blkcg_conf_open_bdev(char **inputp)
|
||||||
*/
|
*/
|
||||||
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||||
char *input, struct blkg_conf_ctx *ctx)
|
char *input, struct blkg_conf_ctx *ctx)
|
||||||
__acquires(rcu) __acquires(&bdev->bd_disk->queue->queue_lock)
|
__acquires(rcu) __acquires(&bdev->bd_queue->queue_lock)
|
||||||
{
|
{
|
||||||
struct block_device *bdev;
|
struct block_device *bdev;
|
||||||
struct request_queue *q;
|
struct request_queue *q;
|
||||||
|
@ -632,7 +632,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||||
if (IS_ERR(bdev))
|
if (IS_ERR(bdev))
|
||||||
return PTR_ERR(bdev);
|
return PTR_ERR(bdev);
|
||||||
|
|
||||||
q = bdev->bd_disk->queue;
|
q = bdev_get_queue(bdev);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
@ -737,9 +737,9 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
|
||||||
* with blkg_conf_prep().
|
* with blkg_conf_prep().
|
||||||
*/
|
*/
|
||||||
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
|
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
|
||||||
__releases(&ctx->bdev->bd_disk->queue->queue_lock) __releases(rcu)
|
__releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu)
|
||||||
{
|
{
|
||||||
spin_unlock_irq(&ctx->bdev->bd_disk->queue->queue_lock);
|
spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
blkdev_put_no_open(ctx->bdev);
|
blkdev_put_no_open(ctx->bdev);
|
||||||
}
|
}
|
||||||
|
@ -842,7 +842,7 @@ static void blkcg_fill_root_iostats(void)
|
||||||
while ((dev = class_dev_iter_next(&iter))) {
|
while ((dev = class_dev_iter_next(&iter))) {
|
||||||
struct block_device *bdev = dev_to_bdev(dev);
|
struct block_device *bdev = dev_to_bdev(dev);
|
||||||
struct blkcg_gq *blkg =
|
struct blkcg_gq *blkg =
|
||||||
blk_queue_root_blkg(bdev->bd_disk->queue);
|
blk_queue_root_blkg(bdev_get_queue(bdev));
|
||||||
struct blkg_iostat tmp;
|
struct blkg_iostat tmp;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
@ -1801,7 +1801,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
blkg = blkg_lookup_create(css_to_blkcg(css),
|
blkg = blkg_lookup_create(css_to_blkcg(css),
|
||||||
bio->bi_bdev->bd_disk->queue);
|
bdev_get_queue(bio->bi_bdev));
|
||||||
while (blkg) {
|
while (blkg) {
|
||||||
if (blkg_tryget(blkg)) {
|
if (blkg_tryget(blkg)) {
|
||||||
ret_blkg = blkg;
|
ret_blkg = blkg;
|
||||||
|
@ -1837,8 +1837,8 @@ void bio_associate_blkg_from_css(struct bio *bio,
|
||||||
if (css && css->parent) {
|
if (css && css->parent) {
|
||||||
bio->bi_blkg = blkg_tryget_closest(bio, css);
|
bio->bi_blkg = blkg_tryget_closest(bio, css);
|
||||||
} else {
|
} else {
|
||||||
blkg_get(bio->bi_bdev->bd_disk->queue->root_blkg);
|
blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg);
|
||||||
bio->bi_blkg = bio->bi_bdev->bd_disk->queue->root_blkg;
|
bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
|
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
|
||||||
|
|
|
@ -280,7 +280,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
|
||||||
* Success if device supports the encryption context, or if we succeeded
|
* Success if device supports the encryption context, or if we succeeded
|
||||||
* in falling back to the crypto API.
|
* in falling back to the crypto API.
|
||||||
*/
|
*/
|
||||||
if (blk_ksm_crypto_cfg_supported(bio->bi_bdev->bd_disk->queue->ksm,
|
if (blk_ksm_crypto_cfg_supported(bdev_get_queue(bio->bi_bdev)->ksm,
|
||||||
&bc_key->crypto_cfg))
|
&bc_key->crypto_cfg))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
|
|
@ -3165,12 +3165,12 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
|
||||||
if (IS_ERR(bdev))
|
if (IS_ERR(bdev))
|
||||||
return PTR_ERR(bdev);
|
return PTR_ERR(bdev);
|
||||||
|
|
||||||
ioc = q_to_ioc(bdev->bd_disk->queue);
|
ioc = q_to_ioc(bdev_get_queue(bdev));
|
||||||
if (!ioc) {
|
if (!ioc) {
|
||||||
ret = blk_iocost_init(bdev->bd_disk->queue);
|
ret = blk_iocost_init(bdev_get_queue(bdev));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
ioc = q_to_ioc(bdev->bd_disk->queue);
|
ioc = q_to_ioc(bdev_get_queue(bdev));
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&ioc->lock);
|
spin_lock_irq(&ioc->lock);
|
||||||
|
@ -3332,12 +3332,12 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
|
||||||
if (IS_ERR(bdev))
|
if (IS_ERR(bdev))
|
||||||
return PTR_ERR(bdev);
|
return PTR_ERR(bdev);
|
||||||
|
|
||||||
ioc = q_to_ioc(bdev->bd_disk->queue);
|
ioc = q_to_ioc(bdev_get_queue(bdev));
|
||||||
if (!ioc) {
|
if (!ioc) {
|
||||||
ret = blk_iocost_init(bdev->bd_disk->queue);
|
ret = blk_iocost_init(bdev_get_queue(bdev));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
ioc = q_to_ioc(bdev->bd_disk->queue);
|
ioc = q_to_ioc(bdev_get_queue(bdev));
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&ioc->lock);
|
spin_lock_irq(&ioc->lock);
|
||||||
|
|
|
@ -2253,7 +2253,7 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
|
||||||
*/
|
*/
|
||||||
void blk_mq_submit_bio(struct bio *bio)
|
void blk_mq_submit_bio(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||||
const int is_sync = op_is_sync(bio->bi_opf);
|
const int is_sync = op_is_sync(bio->bi_opf);
|
||||||
const int is_flush_fua = op_is_flush(bio->bi_opf);
|
const int is_flush_fua = op_is_flush(bio->bi_opf);
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
|
@ -2063,7 +2063,7 @@ void blk_throtl_charge_bio_split(struct bio *bio)
|
||||||
|
|
||||||
bool __blk_throtl_bio(struct bio *bio)
|
bool __blk_throtl_bio(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||||
struct blkcg_gq *blkg = bio->bi_blkg;
|
struct blkcg_gq *blkg = bio->bi_blkg;
|
||||||
struct throtl_qnode *qn = NULL;
|
struct throtl_qnode *qn = NULL;
|
||||||
struct throtl_grp *tg = blkg_to_tg(blkg);
|
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||||
|
|
|
@ -883,7 +883,7 @@ ssize_t part_stat_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
struct block_device *bdev = dev_to_bdev(dev);
|
struct block_device *bdev = dev_to_bdev(dev);
|
||||||
struct request_queue *q = bdev->bd_disk->queue;
|
struct request_queue *q = bdev_get_queue(bdev);
|
||||||
struct disk_stats stat;
|
struct disk_stats stat;
|
||||||
unsigned int inflight;
|
unsigned int inflight;
|
||||||
|
|
||||||
|
@ -927,7 +927,7 @@ ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
struct block_device *bdev = dev_to_bdev(dev);
|
struct block_device *bdev = dev_to_bdev(dev);
|
||||||
struct request_queue *q = bdev->bd_disk->queue;
|
struct request_queue *q = bdev_get_queue(bdev);
|
||||||
unsigned int inflight[2];
|
unsigned int inflight[2];
|
||||||
|
|
||||||
if (queue_is_mq(q))
|
if (queue_is_mq(q))
|
||||||
|
|
|
@ -204,7 +204,7 @@ static ssize_t part_alignment_offset_show(struct device *dev,
|
||||||
struct block_device *bdev = dev_to_bdev(dev);
|
struct block_device *bdev = dev_to_bdev(dev);
|
||||||
|
|
||||||
return sprintf(buf, "%u\n",
|
return sprintf(buf, "%u\n",
|
||||||
queue_limit_alignment_offset(&bdev->bd_disk->queue->limits,
|
queue_limit_alignment_offset(&bdev_get_queue(bdev)->limits,
|
||||||
bdev->bd_start_sect));
|
bdev->bd_start_sect));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,7 +214,7 @@ static ssize_t part_discard_alignment_show(struct device *dev,
|
||||||
struct block_device *bdev = dev_to_bdev(dev);
|
struct block_device *bdev = dev_to_bdev(dev);
|
||||||
|
|
||||||
return sprintf(buf, "%u\n",
|
return sprintf(buf, "%u\n",
|
||||||
queue_limit_discard_alignment(&bdev->bd_disk->queue->limits,
|
queue_limit_discard_alignment(&bdev_get_queue(bdev)->limits,
|
||||||
bdev->bd_start_sect));
|
bdev->bd_start_sect));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue