block: replace blk_queue_nowait with bdev_nowait
Replace blk_queue_nowait with a bdev_nowait helpers that takes the block_device given that the I/O submission path should not have to look into the request_queue. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Pankaj Raghav <p.raghav@samsung.com> Link: https://lore.kernel.org/r/20220927075815.269694-1-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
99e6038743
commit
568ec936bf
|
@ -713,7 +713,7 @@ void submit_bio_noacct(struct bio *bio)
|
|||
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
|
||||
* if queue does not support NOWAIT.
|
||||
*/
|
||||
if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
|
||||
if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
|
||||
goto not_supported;
|
||||
|
||||
if (should_fail_bio(bio))
|
||||
|
|
|
@ -1856,9 +1856,7 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
|
|||
static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
|
||||
return !blk_queue_nowait(q);
|
||||
return !bdev_nowait(dev->bdev);
|
||||
}
|
||||
|
||||
static bool dm_table_supports_nowait(struct dm_table *t)
|
||||
|
|
|
@ -5844,7 +5844,7 @@ int md_run(struct mddev *mddev)
|
|||
}
|
||||
}
|
||||
sysfs_notify_dirent_safe(rdev->sysfs_state);
|
||||
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
|
||||
nowait = nowait && bdev_nowait(rdev->bdev);
|
||||
}
|
||||
|
||||
if (!bioset_initialized(&mddev->bio_set)) {
|
||||
|
@ -6980,7 +6980,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
|
|||
* If the new disk does not support REQ_NOWAIT,
|
||||
* disable on the whole MD.
|
||||
*/
|
||||
if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
|
||||
if (!bdev_nowait(rdev->bdev)) {
|
||||
pr_info("%s: Disabling nowait because %pg does not support nowait\n",
|
||||
mdname(mddev), rdev->bdev);
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
|
|
|
@ -618,7 +618,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
|
|||
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
|
||||
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
|
||||
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
|
||||
#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
|
||||
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
|
||||
|
||||
extern void blk_set_pm_only(struct request_queue *q);
|
||||
|
@ -1280,6 +1279,11 @@ static inline bool bdev_fua(struct block_device *bdev)
|
|||
return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
|
||||
}
|
||||
|
||||
static inline bool bdev_nowait(struct block_device *bdev)
|
||||
{
|
||||
return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
|
||||
}
|
||||
|
||||
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
|
|
@ -1377,7 +1377,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
static bool io_bdev_nowait(struct block_device *bdev)
|
||||
{
|
||||
return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
|
||||
return !bdev || bdev_nowait(bdev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue