for-5.18/write-streams-2022-03-18
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmI1AHwQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgplPjEACVJzKg5NkxpdkDThvq5tejws9KxB/4mHJg NoDMcv1TF+Orsd/HNW6XrgYnbU0ObHom3568/xb8BNegRVFe7V4ME/4IYNRyGOmV qbfciu04L1UkJhI52CIidkOioFABL3r1zgLCIz5vk0Cv9X7Le9x0UabHxJf7u9S+ Z3lNdyxezN0SGx8VT86l/7lSoHtG3VHO9IsQCuNGF02SB+6uGpXBlptbEoQ4nTxd T7/H9FNOe2Wf7eKvcOOds8UlvZYAfYcY0GcRrIOXdHIy25mKFWwn5cDgFTMOH5ID xXpm+JFkDkrfSW1o4FFPxbN9Z6RbVXbGCsrXlIragLO2MJQdXiIUxS1OPT5oAado H9MlX6QtkwziLW9zUWa/N/jmRjc2vzHAxD6JFg/wXxNdtY0kd8TQpaxwTB8mVDPN VCGutt7lJS1CQInQ+ppzbdqzzuLHC1RHAyWSmfUE9rb8cbjxtJBnSIorYRLUesMT GRwqVTXW0osxSgCb1iDiBCJANrX1yPZcemv4Wh1gzbT6IE9sWxWXsE5sy9KvswNc M+E4nu/TYYTfkynItJjLgmDLOoi+V0FBY6ba0mRPBjkriSP4AVlwsZLGVsAHQzuA o5paW1GjRCCwhIQ6+AzZIoOz6wqvprBlUgUkUneyYAQ2ZKC3pZi8zPnpoVdFucVa VaTzP71C1Q== =efaq -----END PGP SIGNATURE----- Merge tag 'for-5.18/write-streams-2022-03-18' of git://git.kernel.dk/linux-block Pull NVMe write streams removal from Jens Axboe: "This removes the write streams support in NVMe. No vendor ever really shipped working support for this, and they are not interested in supporting it. With the NVMe support gone, we have nothing in the tree that supports this. Remove passing around of the hints. The only discussion point in this patchset imho is the fact that the file specific write hint setting/getting fcntl helpers will now return -1/EINVAL like they did before we supported write hints. No known applications use these functions, I only know of one prototype that I help do for RocksDB, and that's not used. That said, with a change like this, it's always a bit controversial. Alternatively, we could just make them return 0 and pretend it worked. It's placement based hints after all" * tag 'for-5.18/write-streams-2022-03-18' of git://git.kernel.dk/linux-block: fs: remove fs.f_write_hint fs: remove kiocb.ki_hint block: remove the per-bio/request write hint nvme: remove support or stream based temperature hint
This commit is contained in:
commit
561593a048
|
@ -257,7 +257,6 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
|
|||
bio->bi_opf = opf;
|
||||
bio->bi_flags = 0;
|
||||
bio->bi_ioprio = 0;
|
||||
bio->bi_write_hint = 0;
|
||||
bio->bi_status = 0;
|
||||
bio->bi_iter.bi_sector = 0;
|
||||
bio->bi_iter.bi_size = 0;
|
||||
|
@ -737,7 +736,6 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
|
|||
bio_flagged(bio_src, BIO_REMAPPED))
|
||||
bio_set_flag(bio, BIO_REMAPPED);
|
||||
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter = bio_src->bi_iter;
|
||||
|
||||
bio_clone_blkg_association(bio, bio_src);
|
||||
|
|
|
@ -170,7 +170,6 @@ static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
|
|||
bio_set_flag(bio, BIO_REMAPPED);
|
||||
bio->bi_opf = bio_src->bi_opf;
|
||||
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
|
|
|
@ -754,13 +754,6 @@ static struct request *attempt_merge(struct request_queue *q,
|
|||
if (rq_data_dir(req) != rq_data_dir(next))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
*/
|
||||
if (req->write_hint != next->write_hint)
|
||||
return NULL;
|
||||
|
||||
if (req->ioprio != next->ioprio)
|
||||
return NULL;
|
||||
|
||||
|
@ -886,13 +879,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
|||
if (!bio_crypt_rq_ctx_compatible(rq, bio))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
*/
|
||||
if (rq->write_hint != bio->bi_write_hint)
|
||||
return false;
|
||||
|
||||
if (rq->ioprio != bio_prio(bio))
|
||||
return false;
|
||||
|
||||
|
|
|
@ -183,35 +183,11 @@ inval:
|
|||
return count;
|
||||
}
|
||||
|
||||
static int queue_write_hint_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
|
||||
seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t queue_write_hint_store(void *data, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
|
||||
q->write_hints[i] = 0;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
|
||||
{ "poll_stat", 0400, queue_poll_stat_show },
|
||||
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
|
||||
{ "pm_only", 0600, queue_pm_only_show, NULL },
|
||||
{ "state", 0600, queue_state_show, queue_state_write },
|
||||
{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
|
||||
{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
|
||||
{ },
|
||||
};
|
||||
|
|
|
@ -2412,7 +2412,6 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
|
|||
rq->cmd_flags |= REQ_FAILFAST_MASK;
|
||||
|
||||
rq->__sector = bio->bi_iter.bi_sector;
|
||||
rq->write_hint = bio->bi_write_hint;
|
||||
blk_rq_bio_prep(rq, bio, nr_segs);
|
||||
|
||||
/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
|
||||
|
|
|
@ -169,7 +169,6 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
|
|||
if (bio_flagged(bio_src, BIO_REMAPPED))
|
||||
bio_set_flag(bio, BIO_REMAPPED);
|
||||
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
|
|
|
@ -83,7 +83,6 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
|||
bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
|
||||
}
|
||||
bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
|
||||
bio.bi_write_hint = iocb->ki_hint;
|
||||
bio.bi_private = current;
|
||||
bio.bi_end_io = blkdev_bio_end_io_simple;
|
||||
bio.bi_ioprio = iocb->ki_ioprio;
|
||||
|
@ -225,7 +224,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||
|
||||
for (;;) {
|
||||
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
|
||||
bio->bi_write_hint = iocb->ki_hint;
|
||||
bio->bi_private = dio;
|
||||
bio->bi_end_io = blkdev_bio_end_io;
|
||||
bio->bi_ioprio = iocb->ki_ioprio;
|
||||
|
@ -327,7 +325,6 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
|||
dio->flags = 0;
|
||||
dio->iocb = iocb;
|
||||
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
|
||||
bio->bi_write_hint = iocb->ki_hint;
|
||||
bio->bi_end_io = blkdev_bio_end_io_async;
|
||||
bio->bi_ioprio = iocb->ki_ioprio;
|
||||
|
||||
|
|
|
@ -1136,8 +1136,6 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
|
|||
goto skip_copy;
|
||||
}
|
||||
|
||||
behind_bio->bi_write_hint = bio->bi_write_hint;
|
||||
|
||||
while (i < vcnt && size) {
|
||||
struct page *page;
|
||||
int len = min_t(int, PAGE_SIZE, size);
|
||||
|
|
|
@ -466,7 +466,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
|
|||
bio->bi_end_io = ppl_log_endio;
|
||||
bio->bi_iter.bi_sector = log->next_io_sector;
|
||||
bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
|
||||
bio->bi_write_hint = ppl_conf->write_hint;
|
||||
|
||||
pr_debug("%s: log->current_io_sector: %llu\n", __func__,
|
||||
(unsigned long long)log->next_io_sector);
|
||||
|
@ -496,7 +495,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
|
|||
bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
|
||||
prev->bi_opf, GFP_NOIO,
|
||||
&ppl_conf->bs);
|
||||
bio->bi_write_hint = prev->bi_write_hint;
|
||||
bio->bi_iter.bi_sector = bio_end_sector(prev);
|
||||
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
|
||||
|
||||
|
@ -1396,7 +1394,6 @@ int ppl_init_log(struct r5conf *conf)
|
|||
atomic64_set(&ppl_conf->seq, 0);
|
||||
INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
|
||||
spin_lock_init(&ppl_conf->no_mem_stripes_lock);
|
||||
ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
|
||||
|
||||
if (!mddev->external) {
|
||||
ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
|
||||
|
@ -1495,25 +1492,13 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
|
|||
static ssize_t
|
||||
ppl_write_hint_show(struct mddev *mddev, char *buf)
|
||||
{
|
||||
size_t ret = 0;
|
||||
struct r5conf *conf;
|
||||
struct ppl_conf *ppl_conf = NULL;
|
||||
|
||||
spin_lock(&mddev->lock);
|
||||
conf = mddev->private;
|
||||
if (conf && raid5_has_ppl(conf))
|
||||
ppl_conf = conf->log_private;
|
||||
ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
|
||||
spin_unlock(&mddev->lock);
|
||||
|
||||
return ret;
|
||||
return sprintf(buf, "%d\n", 0);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
|
||||
{
|
||||
struct r5conf *conf;
|
||||
struct ppl_conf *ppl_conf;
|
||||
int err = 0;
|
||||
unsigned short new;
|
||||
|
||||
|
@ -1527,17 +1512,10 @@ ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
|
|||
return err;
|
||||
|
||||
conf = mddev->private;
|
||||
if (!conf) {
|
||||
if (!conf)
|
||||
err = -ENODEV;
|
||||
} else if (raid5_has_ppl(conf)) {
|
||||
ppl_conf = conf->log_private;
|
||||
if (!ppl_conf)
|
||||
err = -EINVAL;
|
||||
else
|
||||
ppl_conf->write_hint = new;
|
||||
} else {
|
||||
else if (!raid5_has_ppl(conf) || !conf->log_private)
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
mddev_unlock(mddev);
|
||||
|
||||
|
|
|
@ -1211,9 +1211,6 @@ again:
|
|||
bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
|
||||
bi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
|
||||
bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
|
||||
bi->bi_write_hint = sh->dev[i].write_hint;
|
||||
if (!rrdev)
|
||||
sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
|
||||
/*
|
||||
* If this is discard request, set bi_vcnt 0. We don't
|
||||
* want to confuse SCSI because SCSI will replace payload
|
||||
|
@ -1264,8 +1261,6 @@ again:
|
|||
rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
|
||||
rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
|
||||
rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
|
||||
rbi->bi_write_hint = sh->dev[i].write_hint;
|
||||
sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
|
||||
/*
|
||||
* If this is discard request, set bi_vcnt 0. We don't
|
||||
* want to confuse SCSI because SCSI will replace payload
|
||||
|
@ -3407,7 +3402,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
|
|||
(unsigned long long)sh->sector);
|
||||
|
||||
spin_lock_irq(&sh->stripe_lock);
|
||||
sh->dev[dd_idx].write_hint = bi->bi_write_hint;
|
||||
/* Don't allow new IO added to stripes in batch list */
|
||||
if (sh->batch_head)
|
||||
goto overlap;
|
||||
|
|
|
@ -77,10 +77,6 @@ module_param(apst_secondary_latency_tol_us, ulong, 0644);
|
|||
MODULE_PARM_DESC(apst_secondary_latency_tol_us,
|
||||
"secondary APST latency tolerance in us");
|
||||
|
||||
static bool streams;
|
||||
module_param(streams, bool, 0644);
|
||||
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
|
||||
|
||||
/*
|
||||
* nvme_wq - hosts nvme related works that are not reset or delete
|
||||
* nvme_reset_wq - hosts nvme reset works
|
||||
|
@ -720,108 +716,6 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(__nvme_check_ready);
|
||||
|
||||
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
|
||||
{
|
||||
struct nvme_command c = { };
|
||||
|
||||
c.directive.opcode = nvme_admin_directive_send;
|
||||
c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
|
||||
c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
|
||||
c.directive.dtype = NVME_DIR_IDENTIFY;
|
||||
c.directive.tdtype = NVME_DIR_STREAMS;
|
||||
c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
|
||||
|
||||
return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
|
||||
}
|
||||
|
||||
static int nvme_disable_streams(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
return nvme_toggle_streams(ctrl, false);
|
||||
}
|
||||
|
||||
static int nvme_enable_streams(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
return nvme_toggle_streams(ctrl, true);
|
||||
}
|
||||
|
||||
static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
|
||||
struct streams_directive_params *s, u32 nsid)
|
||||
{
|
||||
struct nvme_command c = { };
|
||||
|
||||
memset(s, 0, sizeof(*s));
|
||||
|
||||
c.directive.opcode = nvme_admin_directive_recv;
|
||||
c.directive.nsid = cpu_to_le32(nsid);
|
||||
c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
|
||||
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
|
||||
c.directive.dtype = NVME_DIR_STREAMS;
|
||||
|
||||
return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
|
||||
}
|
||||
|
||||
static int nvme_configure_directives(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct streams_directive_params s;
|
||||
u16 nssa;
|
||||
int ret;
|
||||
|
||||
if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
|
||||
return 0;
|
||||
if (!streams)
|
||||
return 0;
|
||||
|
||||
ret = nvme_enable_streams(ctrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
|
||||
if (ret)
|
||||
goto out_disable_stream;
|
||||
|
||||
nssa = le16_to_cpu(s.nssa);
|
||||
if (nssa < BLK_MAX_WRITE_HINTS - 1) {
|
||||
dev_info(ctrl->device, "too few streams (%u) available\n",
|
||||
nssa);
|
||||
/* this condition is not an error: streams are optional */
|
||||
ret = 0;
|
||||
goto out_disable_stream;
|
||||
}
|
||||
|
||||
ctrl->nr_streams = min_t(u16, nssa, BLK_MAX_WRITE_HINTS - 1);
|
||||
dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
|
||||
return 0;
|
||||
|
||||
out_disable_stream:
|
||||
nvme_disable_streams(ctrl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if 'req' has a write hint associated with it. If it does, assign
|
||||
* a valid namespace stream to the write.
|
||||
*/
|
||||
static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
|
||||
struct request *req, u16 *control,
|
||||
u32 *dsmgmt)
|
||||
{
|
||||
enum rw_hint streamid = req->write_hint;
|
||||
|
||||
if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
|
||||
streamid = 0;
|
||||
else {
|
||||
streamid--;
|
||||
if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
|
||||
return;
|
||||
|
||||
*control |= NVME_RW_DTYPE_STREAMS;
|
||||
*dsmgmt |= streamid << 16;
|
||||
}
|
||||
|
||||
if (streamid < ARRAY_SIZE(req->q->write_hints))
|
||||
req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
|
||||
}
|
||||
|
||||
static inline void nvme_setup_flush(struct nvme_ns *ns,
|
||||
struct nvme_command *cmnd)
|
||||
{
|
||||
|
@ -925,7 +819,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
|
|||
struct request *req, struct nvme_command *cmnd,
|
||||
enum nvme_opcode op)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = ns->ctrl;
|
||||
u16 control = 0;
|
||||
u32 dsmgmt = 0;
|
||||
|
||||
|
@ -948,9 +841,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
|
|||
cmnd->rw.apptag = 0;
|
||||
cmnd->rw.appmask = 0;
|
||||
|
||||
if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
|
||||
nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
|
||||
|
||||
if (ns->ms) {
|
||||
/*
|
||||
* If formated with metadata, the block layer always provides a
|
||||
|
@ -1673,9 +1563,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
|
|||
return;
|
||||
}
|
||||
|
||||
if (ctrl->nr_streams && ns->sws && ns->sgs)
|
||||
size *= ns->sws * ns->sgs;
|
||||
|
||||
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
|
||||
NVME_DSM_MAX_RANGES);
|
||||
|
||||
|
@ -1701,31 +1588,6 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
|
|||
a->csi == b->csi;
|
||||
}
|
||||
|
||||
static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
u32 *phys_bs, u32 *io_opt)
|
||||
{
|
||||
struct streams_directive_params s;
|
||||
int ret;
|
||||
|
||||
if (!ctrl->nr_streams)
|
||||
return 0;
|
||||
|
||||
ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ns->sws = le32_to_cpu(s.sws);
|
||||
ns->sgs = le16_to_cpu(s.sgs);
|
||||
|
||||
if (ns->sws) {
|
||||
*phys_bs = ns->sws * (1 << ns->lba_shift);
|
||||
if (ns->sgs)
|
||||
*io_opt = *phys_bs * ns->sgs;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = ns->ctrl;
|
||||
|
@ -1817,7 +1679,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
|
|||
blk_integrity_unregister(disk);
|
||||
|
||||
atomic_bs = phys_bs = bs;
|
||||
nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
|
||||
if (id->nabo == 0) {
|
||||
/*
|
||||
* Bit 1 indicates whether NAWUPF is defined for this namespace
|
||||
|
@ -3107,10 +2968,6 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = nvme_configure_directives(ctrl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = nvme_configure_acre(ctrl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
@ -280,7 +280,6 @@ struct nvme_ctrl {
|
|||
u16 crdt[3];
|
||||
u16 oncs;
|
||||
u16 oacs;
|
||||
u16 nr_streams;
|
||||
u16 sqsize;
|
||||
u32 max_namespaces;
|
||||
atomic_t abort_limit;
|
||||
|
|
1
fs/aio.c
1
fs/aio.c
|
@ -1478,7 +1478,6 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
|
|||
req->ki_flags = iocb_flags(req->ki_filp);
|
||||
if (iocb->aio_flags & IOCB_FLAG_RESFD)
|
||||
req->ki_flags |= IOCB_EVENTFD;
|
||||
req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp));
|
||||
if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
|
||||
/*
|
||||
* If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
|
||||
|
|
|
@ -3329,7 +3329,6 @@ static int alloc_new_bio(struct btrfs_inode *inode,
|
|||
bio_ctrl->bio_flags = bio_flags;
|
||||
bio->bi_end_io = end_io_func;
|
||||
bio->bi_private = &inode->io_tree;
|
||||
bio->bi_write_hint = inode->vfs_inode.i_write_hint;
|
||||
bio->bi_opf = opf;
|
||||
ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
|
||||
if (ret < 0)
|
||||
|
|
13
fs/buffer.c
13
fs/buffer.c
|
@ -53,7 +53,7 @@
|
|||
|
||||
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
|
||||
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||
enum rw_hint hint, struct writeback_control *wbc);
|
||||
struct writeback_control *wbc);
|
||||
|
||||
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
|
||||
|
||||
|
@ -1804,8 +1804,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
|
|||
do {
|
||||
struct buffer_head *next = bh->b_this_page;
|
||||
if (buffer_async_write(bh)) {
|
||||
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
|
||||
inode->i_write_hint, wbc);
|
||||
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
|
||||
nr_underway++;
|
||||
}
|
||||
bh = next;
|
||||
|
@ -1859,8 +1858,7 @@ recover:
|
|||
struct buffer_head *next = bh->b_this_page;
|
||||
if (buffer_async_write(bh)) {
|
||||
clear_buffer_dirty(bh);
|
||||
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
|
||||
inode->i_write_hint, wbc);
|
||||
submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
|
||||
nr_underway++;
|
||||
}
|
||||
bh = next;
|
||||
|
@ -3004,7 +3002,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
|
|||
}
|
||||
|
||||
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||
enum rw_hint write_hint, struct writeback_control *wbc)
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
|
@ -3030,7 +3028,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
|||
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
|
||||
|
||||
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
|
||||
bio->bi_write_hint = write_hint;
|
||||
|
||||
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
|
||||
BUG_ON(bio->bi_iter.bi_size != bh->b_size);
|
||||
|
@ -3052,7 +3049,7 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
|||
|
||||
int submit_bh(int op, int op_flags, struct buffer_head *bh)
|
||||
{
|
||||
return submit_bh_wbc(op, op_flags, bh, 0, NULL);
|
||||
return submit_bh_wbc(op, op_flags, bh, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(submit_bh);
|
||||
|
||||
|
|
|
@ -138,7 +138,6 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
|
|||
ki->iocb.ki_filp = file;
|
||||
ki->iocb.ki_pos = start_pos + skipped;
|
||||
ki->iocb.ki_flags = IOCB_DIRECT;
|
||||
ki->iocb.ki_hint = ki_hint_validate(file_write_hint(file));
|
||||
ki->iocb.ki_ioprio = get_current_ioprio();
|
||||
ki->skipped = skipped;
|
||||
ki->object = object;
|
||||
|
@ -313,7 +312,6 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
|
|||
ki->iocb.ki_filp = file;
|
||||
ki->iocb.ki_pos = start_pos;
|
||||
ki->iocb.ki_flags = IOCB_DIRECT | IOCB_WRITE;
|
||||
ki->iocb.ki_hint = ki_hint_validate(file_write_hint(file));
|
||||
ki->iocb.ki_ioprio = get_current_ioprio();
|
||||
ki->object = object;
|
||||
ki->inval_counter = cres->inval_counter;
|
||||
|
|
|
@ -402,9 +402,6 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
|
|||
bio->bi_end_io = dio_bio_end_aio;
|
||||
else
|
||||
bio->bi_end_io = dio_bio_end_io;
|
||||
|
||||
bio->bi_write_hint = dio->iocb->ki_hint;
|
||||
|
||||
sdio->bio = bio;
|
||||
sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
|
||||
}
|
||||
|
|
|
@ -373,7 +373,6 @@ void ext4_io_submit(struct ext4_io_submit *io)
|
|||
if (bio) {
|
||||
if (io->io_wbc->sync_mode == WB_SYNC_ALL)
|
||||
io->io_bio->bi_opf |= REQ_SYNC;
|
||||
io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
|
||||
submit_bio(io->io_bio);
|
||||
}
|
||||
io->io_bio = NULL;
|
||||
|
@ -418,10 +417,8 @@ static void io_submit_add_bh(struct ext4_io_submit *io,
|
|||
submit_and_retry:
|
||||
ext4_io_submit(io);
|
||||
}
|
||||
if (io->io_bio == NULL) {
|
||||
if (io->io_bio == NULL)
|
||||
io_submit_init_bio(io, bh);
|
||||
io->io_bio->bi_write_hint = inode->i_write_hint;
|
||||
}
|
||||
ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
|
||||
if (ret != bh->b_size)
|
||||
goto submit_and_retry;
|
||||
|
|
|
@ -428,8 +428,6 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
|
|||
} else {
|
||||
bio->bi_end_io = f2fs_write_end_io;
|
||||
bio->bi_private = sbi;
|
||||
bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
|
||||
fio->type, fio->temp);
|
||||
}
|
||||
iostat_alloc_and_bind_ctx(sbi, bio, NULL);
|
||||
|
||||
|
|
|
@ -4482,10 +4482,8 @@ static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
|
|||
struct f2fs_inode_info *fi = F2FS_I(inode);
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
const bool do_opu = f2fs_lfs_mode(sbi);
|
||||
const int whint_mode = F2FS_OPTION(sbi).whint_mode;
|
||||
const loff_t pos = iocb->ki_pos;
|
||||
const ssize_t count = iov_iter_count(from);
|
||||
const enum rw_hint hint = iocb->ki_hint;
|
||||
unsigned int dio_flags;
|
||||
struct iomap_dio *dio;
|
||||
ssize_t ret;
|
||||
|
@ -4518,8 +4516,6 @@ static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
|
|||
if (do_opu)
|
||||
f2fs_down_read(&fi->i_gc_rwsem[READ]);
|
||||
}
|
||||
if (whint_mode == WHINT_MODE_OFF)
|
||||
iocb->ki_hint = WRITE_LIFE_NOT_SET;
|
||||
|
||||
/*
|
||||
* We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
|
||||
|
@ -4542,8 +4538,6 @@ static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
|
|||
ret = iomap_dio_complete(dio);
|
||||
}
|
||||
|
||||
if (whint_mode == WHINT_MODE_OFF)
|
||||
iocb->ki_hint = hint;
|
||||
if (do_opu)
|
||||
f2fs_up_read(&fi->i_gc_rwsem[READ]);
|
||||
f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
|
||||
|
|
18
fs/fcntl.c
18
fs/fcntl.c
|
@ -291,22 +291,6 @@ static long fcntl_rw_hint(struct file *file, unsigned int cmd,
|
|||
u64 h;
|
||||
|
||||
switch (cmd) {
|
||||
case F_GET_FILE_RW_HINT:
|
||||
h = file_write_hint(file);
|
||||
if (copy_to_user(argp, &h, sizeof(*argp)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
case F_SET_FILE_RW_HINT:
|
||||
if (copy_from_user(&h, argp, sizeof(h)))
|
||||
return -EFAULT;
|
||||
hint = (enum rw_hint) h;
|
||||
if (!rw_hint_valid(hint))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&file->f_lock);
|
||||
file->f_write_hint = hint;
|
||||
spin_unlock(&file->f_lock);
|
||||
return 0;
|
||||
case F_GET_RW_HINT:
|
||||
h = inode->i_write_hint;
|
||||
if (copy_to_user(argp, &h, sizeof(*argp)))
|
||||
|
@ -431,8 +415,6 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
|
|||
break;
|
||||
case F_GET_RW_HINT:
|
||||
case F_SET_RW_HINT:
|
||||
case F_GET_FILE_RW_HINT:
|
||||
case F_SET_FILE_RW_HINT:
|
||||
err = fcntl_rw_hint(filp, cmd, arg);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -491,7 +491,6 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
|
|||
new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
|
||||
bio_clone_blkg_association(new, prev);
|
||||
new->bi_iter.bi_sector = bio_end_sector(prev);
|
||||
new->bi_write_hint = prev->bi_write_hint;
|
||||
bio_chain(new, prev);
|
||||
submit_bio(prev);
|
||||
return new;
|
||||
|
|
|
@ -3941,7 +3941,6 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
{
|
||||
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
|
||||
return -EBADF;
|
||||
req->rw.kiocb.ki_hint = ki_hint_validate(file_write_hint(req->file));
|
||||
return io_prep_rw(req, sqe);
|
||||
}
|
||||
|
||||
|
|
|
@ -1222,7 +1222,6 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
|
|||
REQ_OP_WRITE | wbc_to_write_flags(wbc),
|
||||
GFP_NOFS, &iomap_ioend_bioset);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_write_hint = inode->i_write_hint;
|
||||
wbc_init_bio(wbc, bio);
|
||||
|
||||
ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
|
||||
|
@ -1253,7 +1252,6 @@ iomap_chain_bio(struct bio *prev)
|
|||
new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
|
||||
bio_clone_blkg_association(new, prev);
|
||||
new->bi_iter.bi_sector = bio_end_sector(prev);
|
||||
new->bi_write_hint = prev->bi_write_hint;
|
||||
|
||||
bio_chain(prev, new);
|
||||
bio_get(prev); /* for iomap_finish_ioend */
|
||||
|
|
|
@ -315,7 +315,6 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
|
|||
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
|
||||
GFP_KERNEL);
|
||||
bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
|
||||
bio->bi_write_hint = dio->iocb->ki_hint;
|
||||
bio->bi_ioprio = dio->iocb->ki_ioprio;
|
||||
bio->bi_private = dio;
|
||||
bio->bi_end_io = iomap_dio_bio_end_io;
|
||||
|
|
|
@ -588,7 +588,6 @@ alloc_new:
|
|||
GFP_NOFS);
|
||||
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
|
||||
wbc_init_bio(wbc, bio);
|
||||
bio->bi_write_hint = inode->i_write_hint;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -835,7 +835,6 @@ static int do_dentry_open(struct file *f,
|
|||
likely(f->f_op->write || f->f_op->write_iter))
|
||||
f->f_mode |= FMODE_CAN_WRITE;
|
||||
|
||||
f->f_write_hint = WRITE_LIFE_NOT_SET;
|
||||
f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
|
||||
|
||||
file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
|
||||
|
|
|
@ -695,7 +695,6 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
|
|||
bio = bio_alloc(bdev, nr_pages,
|
||||
REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
|
||||
bio->bi_iter.bi_sector = zi->i_zsector;
|
||||
bio->bi_write_hint = iocb->ki_hint;
|
||||
bio->bi_ioprio = iocb->ki_ioprio;
|
||||
if (iocb->ki_flags & IOCB_DSYNC)
|
||||
bio->bi_opf |= REQ_FUA;
|
||||
|
|
|
@ -250,7 +250,6 @@ struct bio {
|
|||
*/
|
||||
unsigned short bi_flags; /* BIO_* below */
|
||||
unsigned short bi_ioprio;
|
||||
unsigned short bi_write_hint;
|
||||
blk_status_t bi_status;
|
||||
atomic_t __bi_remaining;
|
||||
|
||||
|
|
|
@ -517,9 +517,6 @@ struct request_queue {
|
|||
|
||||
bool mq_sysfs_init_done;
|
||||
|
||||
#define BLK_MAX_WRITE_HINTS 5
|
||||
u64 write_hints[BLK_MAX_WRITE_HINTS];
|
||||
|
||||
/*
|
||||
* Independent sector access ranges. This is always NULL for
|
||||
* devices that do not have multiple independent access ranges.
|
||||
|
|
|
@ -328,7 +328,6 @@ struct kiocb {
|
|||
void (*ki_complete)(struct kiocb *iocb, long ret);
|
||||
void *private;
|
||||
int ki_flags;
|
||||
u16 ki_hint;
|
||||
u16 ki_ioprio; /* See linux/ioprio.h */
|
||||
struct wait_page_queue *ki_waitq; /* for async buffered IO */
|
||||
randomized_struct_fields_end
|
||||
|
@ -973,7 +972,6 @@ struct file {
|
|||
* Must not be taken from IRQ context.
|
||||
*/
|
||||
spinlock_t f_lock;
|
||||
enum rw_hint f_write_hint;
|
||||
atomic_long_t f_count;
|
||||
unsigned int f_flags;
|
||||
fmode_t f_mode;
|
||||
|
@ -2222,31 +2220,13 @@ static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns,
|
|||
!gid_valid(i_gid_into_mnt(mnt_userns, inode));
|
||||
}
|
||||
|
||||
static inline enum rw_hint file_write_hint(struct file *file)
|
||||
{
|
||||
if (file->f_write_hint != WRITE_LIFE_NOT_SET)
|
||||
return file->f_write_hint;
|
||||
|
||||
return file_inode(file)->i_write_hint;
|
||||
}
|
||||
|
||||
static inline int iocb_flags(struct file *file);
|
||||
|
||||
static inline u16 ki_hint_validate(enum rw_hint hint)
|
||||
{
|
||||
typeof(((struct kiocb *)0)->ki_hint) max_hint = -1;
|
||||
|
||||
if (hint <= max_hint)
|
||||
return hint;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
|
||||
{
|
||||
*kiocb = (struct kiocb) {
|
||||
.ki_filp = filp,
|
||||
.ki_flags = iocb_flags(filp),
|
||||
.ki_hint = ki_hint_validate(file_write_hint(filp)),
|
||||
.ki_ioprio = get_current_ioprio(),
|
||||
};
|
||||
}
|
||||
|
@ -2257,7 +2237,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
|
|||
*kiocb = (struct kiocb) {
|
||||
.ki_filp = filp,
|
||||
.ki_flags = kiocb_src->ki_flags,
|
||||
.ki_hint = kiocb_src->ki_hint,
|
||||
.ki_ioprio = kiocb_src->ki_ioprio,
|
||||
.ki_pos = kiocb_src->ki_pos,
|
||||
};
|
||||
|
|
|
@ -956,12 +956,11 @@ TRACE_EVENT(f2fs_direct_IO_enter,
|
|||
__entry->rw = rw;
|
||||
),
|
||||
|
||||
TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_hint = %x ki_ioprio = %x rw = %d",
|
||||
TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d",
|
||||
show_dev_ino(__entry),
|
||||
__entry->iocb->ki_pos,
|
||||
__entry->len,
|
||||
__entry->iocb->ki_flags,
|
||||
__entry->iocb->ki_hint,
|
||||
__entry->iocb->ki_ioprio,
|
||||
__entry->rw)
|
||||
);
|
||||
|
|
Loading…
Reference in New Issue