From 882ec4e609c1a6de1836e765905f62de1502ae1e Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 14 Sep 2020 12:02:23 -0400 Subject: [PATCH 01/20] dm table: stack 'chunk_sectors' limit to account for target-specific splitting If target set ti->max_io_len it must be used when stacking DM device's queue_limits to establish a 'chunk_sectors' that is compatible with the IO stack. By using lcm_not_zero() care is taken to avoid blindly overriding the chunk_sectors limit stacked up by blk_stack_limits(). Depends-on: 07d098e6bbad ("block: allow 'chunk_sectors' to be non-power-of-2") Signed-off-by: Mike Snitzer --- drivers/md/dm-table.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index c3be7cb2570c..704345e95cc1 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -1506,6 +1507,10 @@ int dm_calculate_queue_limits(struct dm_table *table, zone_sectors = ti_limits.chunk_sectors; } + /* Stack chunk_sectors if target-specific splitting is required */ + if (ti->max_io_len) + ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len, + ti_limits.chunk_sectors); /* Set I/O hints portion of queue limits */ if (ti->type->io_hints) ti->type->io_hints(ti, &ti_limits); From 5091cdec56faeaefa79de4b6cb3c3c55e50d1ac3 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 18 Sep 2020 20:22:30 -0400 Subject: [PATCH 02/20] dm: change max_io_len() to use blk_max_size_offset() Using blk_max_size_offset() enables DM core's splitting to impose ti->max_io_len (via q->limits.chunk_sectors) and also fallback to respecting q->limits.max_sectors if chunk_sectors isn't set. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 977a962fa0bb..82886b4edab8 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1051,22 +1051,18 @@ static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti static sector_t max_io_len(sector_t sector, struct dm_target *ti) { sector_t len = max_io_len_target_boundary(sector, ti); - sector_t offset, max_len; + sector_t max_len; /* * Does the target need to split even further? + * - q->limits.chunk_sectors reflects ti->max_io_len so + * blk_max_size_offset() provides required splitting. + * - blk_max_size_offset() also respects q->limits.max_sectors */ - if (ti->max_io_len) { - offset = dm_target_offset(ti, sector); - if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) - max_len = sector_div(offset, ti->max_io_len); - else - max_len = offset & (ti->max_io_len - 1); - max_len = ti->max_io_len - max_len; - - if (len > max_len) - len = max_len; - } + max_len = blk_max_size_offset(dm_table_get_md(ti->table)->queue, + dm_target_offset(ti, sector)); + if (len > max_len) + len = max_len; return len; } From 094ee64d7de8ab72b495ff9c03d86a60272da56d Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 14 Sep 2020 13:50:49 -0400 Subject: [PATCH 03/20] dm: push md->immutable_target optimization down to __process_bio() Also, update associated stale comment in __bind(). Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 82886b4edab8..e1cb3b9fd207 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1680,7 +1680,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, * fact that targets that use it do _not_ have a need to split bios. */ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, - struct bio *bio, struct dm_target *ti) + struct bio *bio) { struct clone_info ci; blk_qc_t ret = BLK_QC_T_NONE; @@ -1705,6 +1705,12 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, /* dec_pending submits any data associated with flush */ } else { struct dm_target_io *tio; + struct dm_target *ti = md->immutable_target; + + if (WARN_ON_ONCE(!ti)) { + error = -EIO; + goto out; + } ci.bio = bio; ci.sector_count = bio_sectors(bio); @@ -1724,21 +1730,12 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { blk_qc_t ret = BLK_QC_T_NONE; - struct dm_target *ti = md->immutable_target; if (unlikely(!map)) { bio_io_error(bio); return ret; } - if (!ti) { - ti = dm_table_find_target(map, bio->bi_iter.bi_sector); - if (unlikely(!ti)) { - bio_io_error(bio); - return ret; - } - } - /* * If in ->submit_bio we need to use blk_queue_split(), otherwise * queue_limits for abnormal requests (e.g. discard, writesame, etc) @@ -1753,7 +1750,7 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, } if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) - return __process_bio(md, map, bio, ti); + return __process_bio(md, map, bio); return __split_and_process_bio(md, map, bio); } @@ -2120,8 +2117,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, /* * Leverage the fact that request-based DM targets and * NVMe bio based targets are immutable singletons - * - used to optimize both dm_request_fn and dm_mq_queue_rq; - * and __process_bio. + * - used to optimize both __process_bio and dm_mq_queue_rq */ md->immutable_target = dm_table_get_immutable_target(t); } From 3720281db9ad4905c3afc1bf389314d64e145093 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Sat, 19 Sep 2020 13:12:48 -0400 Subject: [PATCH 04/20] dm: optimize max_io_len() by inlining max_io_len_target_boundary() Saves redundant dm_target_offset() math. Also, reverse argument order for max_io_len() to be consistent with other similar functions. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e1cb3b9fd207..0d3639414be7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1041,16 +1041,16 @@ static void clone_endio(struct bio *bio) * Return maximum size of I/O possible at the supplied sector up to the current * target boundary. */ -static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) +static inline sector_t max_io_len_target_boundary(struct dm_target *ti, + sector_t target_offset) { - sector_t target_offset = dm_target_offset(ti, sector); - return ti->len - target_offset; } -static sector_t max_io_len(sector_t sector, struct dm_target *ti) +static sector_t max_io_len(struct dm_target *ti, sector_t sector) { - sector_t len = max_io_len_target_boundary(sector, ti); + sector_t target_offset = dm_target_offset(ti, sector); + sector_t len = max_io_len_target_boundary(ti, target_offset); sector_t max_len; /* @@ -1060,7 +1060,7 @@ static sector_t max_io_len(sector_t sector, struct dm_target *ti) * - blk_max_size_offset() also respects q->limits.max_sectors */ max_len = blk_max_size_offset(dm_table_get_md(ti->table)->queue, - dm_target_offset(ti, sector)); + target_offset); if (len > max_len) len = max_len; @@ -1115,7 +1115,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, goto out; if (!ti->type->direct_access) goto out; - len = max_io_len(sector, ti) / PAGE_SECTORS; + len = max_io_len(ti, sector) / PAGE_SECTORS; if (len < 1) goto out; nr_pages = min(len, nr_pages); @@ -1497,7 +1497,8 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target * if (!num_bios) return -EOPNOTSUPP; - len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); + len = min_t(sector_t, ci->sector_count, + max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); __send_duplicate_bios(ci, ti, num_bios, &len); @@ -1578,7 +1579,7 @@ static int __split_and_process_non_flush(struct clone_info *ci) if (__process_abnormal_io(ci, ti, &r)) return r; - len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); + len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); if (r < 0) From 828678b87eff06a4fff1e13e8b107287d522ed30 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 14 Sep 2020 13:59:53 -0400 Subject: [PATCH 05/20] dm: push use of on-stack flush_bio down to __send_empty_flush() Eliminates duplicate code, no functional change. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 37 +++++++++++++------------------------ 1 file changed, 13 insertions(+), 24 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0d3639414be7..ea901cb09ea7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1426,6 +1426,17 @@ static int __send_empty_flush(struct clone_info *ci) { unsigned target_nr = 0; struct dm_target *ti; + struct bio flush_bio; + + /* + * Use an on-stack bio for this, it's safe since we don't + * need to reference it after submit. It's just used as + * the basis for the clone(s). + */ + bio_init(&flush_bio, NULL, 0); + flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; + ci->bio = &flush_bio; + ci->sector_count = 0; /* * Empty flush uses a statically initialized bio, as the base for @@ -1439,6 +1450,8 @@ static int __send_empty_flush(struct clone_info *ci) BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); + + bio_uninit(ci->bio); return 0; } @@ -1615,19 +1628,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, init_clone_info(&ci, md, map, bio); if (bio->bi_opf & REQ_PREFLUSH) { - struct bio flush_bio; - - /* - * Use an on-stack bio for this, it's safe since we don't - * need to reference it after submit. It's just used as - * the basis for the clone(s). - */ - bio_init(&flush_bio, NULL, 0); - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - ci.bio = &flush_bio; - ci.sector_count = 0; error = __send_empty_flush(&ci); - bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else if (op_is_zone_mgmt(bio_op(bio))) { ci.bio = bio; @@ -1690,19 +1691,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, init_clone_info(&ci, md, map, bio); if (bio->bi_opf & REQ_PREFLUSH) { - struct bio flush_bio; - - /* - * Use an on-stack bio for this, it's safe since we don't - * need to reference it after submit. It's just used as - * the basis for the clone(s). - */ - bio_init(&flush_bio, NULL, 0); - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - ci.bio = &flush_bio; - ci.sector_count = 0; error = __send_empty_flush(&ci); - bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else { struct dm_target_io *tio; From 9679b5a7ec400f18f1812339b59c94750db48a76 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 15 Sep 2020 21:56:29 -0400 Subject: [PATCH 06/20] dm: simplify __process_abnormal_io() Only call bio_op() once in switch statement. Also remove the excessive factoring out to one line functions. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 68 +++++++++++++------------------------------------ 1 file changed, 17 insertions(+), 51 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ea901cb09ea7..f7184b3dca66 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1474,28 +1474,6 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, return 0; } -typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); - -static unsigned get_num_discard_bios(struct dm_target *ti) -{ - return ti->num_discard_bios; -} - -static unsigned get_num_secure_erase_bios(struct dm_target *ti) -{ - return ti->num_secure_erase_bios; -} - -static unsigned get_num_write_same_bios(struct dm_target *ti) -{ - return ti->num_write_same_bios; -} - -static unsigned get_num_write_zeroes_bios(struct dm_target *ti) -{ - return ti->num_write_zeroes_bios; -} - static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, unsigned num_bios) { @@ -1521,26 +1499,6 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target * return 0; } -static int __send_discard(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti)); -} - -static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti)); -} - -static int __send_write_same(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti)); -} - -static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti)); -} - static bool is_abnormal_io(struct bio *bio) { bool r = false; @@ -1561,18 +1519,26 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, int *result) { struct bio *bio = ci->bio; + unsigned num_bios = 0; - if (bio_op(bio) == REQ_OP_DISCARD) - *result = __send_discard(ci, ti); - else if (bio_op(bio) == REQ_OP_SECURE_ERASE) - *result = __send_secure_erase(ci, ti); - else if (bio_op(bio) == REQ_OP_WRITE_SAME) - *result = __send_write_same(ci, ti); - else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) - *result = __send_write_zeroes(ci, ti); - else + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + num_bios = ti->num_discard_bios; + break; + case REQ_OP_SECURE_ERASE: + num_bios = ti->num_secure_erase_bios; + break; + case REQ_OP_WRITE_SAME: + num_bios = ti->num_write_same_bios; + break; + case REQ_OP_WRITE_ZEROES: + num_bios = ti->num_write_zeroes_bios; + break; + default: return false; + } + *result = __send_changing_extent_only(ci, ti, num_bios); return true; } From 7465d7ac50edb3158c5eb957c5ecd3a5310e1c68 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 17 Sep 2020 12:59:36 -0400 Subject: [PATCH 07/20] dm: eliminate need for start_io_acct() forward declaration Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 78 ++++++++++++++++++++++++------------------------- 1 file changed, 38 insertions(+), 40 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f7184b3dca66..b5b18bafd865 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -591,7 +591,44 @@ out: return r; } -static void start_io_acct(struct dm_io *io); +u64 dm_start_time_ns_from_clone(struct bio *bio) +{ + struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); + struct dm_io *io = tio->io; + + return jiffies_to_nsecs(io->start_time); +} +EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); + +static void start_io_acct(struct dm_io *io) +{ + struct mapped_device *md = io->md; + struct bio *bio = io->orig_bio; + + io->start_time = bio_start_io_acct(bio); + if (unlikely(dm_stats_used(&md->stats))) + dm_stats_account_io(&md->stats, bio_data_dir(bio), + bio->bi_iter.bi_sector, bio_sectors(bio), + false, 0, &io->stats_aux); +} + +static void end_io_acct(struct dm_io *io) +{ + struct mapped_device *md = io->md; + struct bio *bio = io->orig_bio; + unsigned long duration = jiffies - io->start_time; + + bio_end_io_acct(bio, io->start_time); + + if (unlikely(dm_stats_used(&md->stats))) + dm_stats_account_io(&md->stats, bio_data_dir(bio), + bio->bi_iter.bi_sector, bio_sectors(bio), + true, duration, &io->stats_aux); + + /* nudge anyone waiting on suspend queue */ + if (unlikely(wq_has_sleeper(&md->wait))) + wake_up(&md->wait); +} static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) { @@ -657,45 +694,6 @@ static void free_tio(struct dm_target_io *tio) bio_put(&tio->clone); } -u64 dm_start_time_ns_from_clone(struct bio *bio) -{ - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); - struct dm_io *io = tio->io; - - return jiffies_to_nsecs(io->start_time); -} -EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); - -static void start_io_acct(struct dm_io *io) -{ - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - - io->start_time = bio_start_io_acct(bio); - if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio_data_dir(bio), - bio->bi_iter.bi_sector, bio_sectors(bio), - false, 0, &io->stats_aux); -} - -static void end_io_acct(struct dm_io *io) -{ - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - unsigned long duration = jiffies - io->start_time; - - bio_end_io_acct(bio, io->start_time); - - if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio_data_dir(bio), - bio->bi_iter.bi_sector, bio_sectors(bio), - true, duration, &io->stats_aux); - - /* nudge anyone waiting on suspend queue */ - if (unlikely(wq_has_sleeper(&md->wait))) - wake_up(&md->wait); -} - /* * Add the bio to the list of deferred io. */ From 33bd6f0693857492ab19869d79801437ac1e42ba Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Sat, 19 Sep 2020 13:09:11 -0400 Subject: [PATCH 08/20] dm table: make 'struct dm_table' definition accessible to all of DM core Move 'struct dm_table' definition from dm-table.c to dm-core.h and update DM core to access its members directly. Helps optimize max_io_len() and other methods slightly. Signed-off-by: Mike Snitzer --- drivers/md/dm-core.h | 56 +++++++++++++++++++++++++++++++++++++++++-- drivers/md/dm-rq.c | 2 +- drivers/md/dm-table.c | 47 ++---------------------------------- drivers/md/dm.c | 23 ++++-------------- drivers/md/dm.h | 3 --- 5 files changed, 61 insertions(+), 70 deletions(-) diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index c4ef1fceead6..d522093cb39d 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -25,9 +26,11 @@ struct dm_kobject_holder { }; /* - * DM core internal structure that used directly by dm.c and dm-rq.c - * DM targets must _not_ deference a mapped_device to directly access its members! + * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c. + * DM targets must _not_ deference a mapped_device or dm_table to directly + * access their members! */ + struct mapped_device { struct mutex suspend_lock; @@ -119,6 +122,55 @@ void disable_discard(struct mapped_device *md); void disable_write_same(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); +static inline sector_t dm_get_size(struct mapped_device *md) +{ + return get_capacity(md->disk); +} + +static inline struct dm_stats *dm_get_stats(struct mapped_device *md) +{ + return &md->stats; +} + +#define DM_TABLE_MAX_DEPTH 16 + +struct dm_table { + struct mapped_device *md; + enum dm_queue_mode type; + + /* btree table */ + unsigned int depth; + unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */ + sector_t *index[DM_TABLE_MAX_DEPTH]; + + unsigned int num_targets; + unsigned int num_allocated; + sector_t *highs; + struct dm_target *targets; + + struct target_type *immutable_target_type; + + bool integrity_supported:1; + bool singleton:1; + unsigned integrity_added:1; + + /* + * Indicates the rw permissions for the new logical + * device. This should be a combination of FMODE_READ + * and FMODE_WRITE. + */ + fmode_t mode; + + /* a list of devices used by this table */ + struct list_head devices; + + /* events get handed up using this callback */ + void (*event_fn)(void *); + void *event_context; + + struct dm_md_mempools *mempools; +}; + static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) { return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 6d743ff6a314..729a72ec30cc 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -175,7 +175,7 @@ static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long mse void dm_mq_kick_requeue_list(struct mapped_device *md) { - __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); + __dm_mq_kick_requeue_list(md->queue, 0); } EXPORT_SYMBOL(dm_mq_kick_requeue_list); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 704345e95cc1..3ad22adf322d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -25,48 +25,10 @@ #define DM_MSG_PREFIX "table" -#define MAX_DEPTH 16 #define NODE_SIZE L1_CACHE_BYTES #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) -struct dm_table { - struct mapped_device *md; - enum dm_queue_mode type; - - /* btree table */ - unsigned int depth; - unsigned int counts[MAX_DEPTH]; /* in nodes */ - sector_t *index[MAX_DEPTH]; - - unsigned int num_targets; - unsigned int num_allocated; - sector_t *highs; - struct dm_target *targets; - - struct target_type *immutable_target_type; - - bool integrity_supported:1; - bool singleton:1; - unsigned integrity_added:1; - - /* - * Indicates the rw permissions for the new logical - * device. This should be a combination of FMODE_READ - * and FMODE_WRITE. - */ - fmode_t mode; - - /* a list of devices used by this table */ - struct list_head devices; - - /* events get handed up using this callback */ - void (*event_fn)(void *); - void *event_context; - - struct dm_md_mempools *mempools; -}; - /* * Similar to ceiling(log_size(n)) */ @@ -2085,16 +2047,11 @@ EXPORT_SYMBOL_GPL(dm_table_device_name); void dm_table_run_md_queue_async(struct dm_table *t) { - struct mapped_device *md; - struct request_queue *queue; - if (!dm_table_request_based(t)) return; - md = dm_table_get_md(t); - queue = dm_get_md_queue(md); - if (queue) - blk_mq_run_hw_queues(queue, true); + if (t->md->queue) + blk_mq_run_hw_queues(t->md->queue, true); } EXPORT_SYMBOL(dm_table_run_md_queue_async); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b5b18bafd865..a1adcf0ab821 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -422,21 +422,6 @@ static void do_deferred_remove(struct work_struct *w) dm_deferred_remove(); } -sector_t dm_get_size(struct mapped_device *md) -{ - return get_capacity(md->disk); -} - -struct request_queue *dm_get_md_queue(struct mapped_device *md) -{ - return md->queue; -} - -struct dm_stats *dm_get_stats(struct mapped_device *md) -{ - return &md->stats; -} - static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mapped_device *md = bdev->bd_disk->private_data; @@ -1057,7 +1042,7 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector) * blk_max_size_offset() provides required splitting. * - blk_max_size_offset() also respects q->limits.max_sectors */ - max_len = blk_max_size_offset(dm_table_get_md(ti->table)->queue, + max_len = blk_max_size_offset(ti->table->md->queue, target_offset); if (len > max_len) len = max_len; @@ -2931,19 +2916,19 @@ int dm_test_deferred_remove_flag(struct mapped_device *md) int dm_suspended(struct dm_target *ti) { - return dm_suspended_md(dm_table_get_md(ti->table)); + return dm_suspended_md(ti->table->md); } EXPORT_SYMBOL_GPL(dm_suspended); int dm_post_suspending(struct dm_target *ti) { - return dm_post_suspending_md(dm_table_get_md(ti->table)); + return dm_post_suspending_md(ti->table->md); } EXPORT_SYMBOL_GPL(dm_post_suspending); int dm_noflush_suspending(struct dm_target *ti) { - return __noflush_suspending(dm_table_get_md(ti->table)); + return __noflush_suspending(ti->table->md); } EXPORT_SYMBOL_GPL(dm_noflush_suspending); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 4f5fe664d05a..fffe1e289c53 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -179,12 +179,9 @@ int dm_open_count(struct mapped_device *md); int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); int dm_cancel_deferred_remove(struct mapped_device *md); int dm_request_based(struct mapped_device *md); -sector_t dm_get_size(struct mapped_device *md); -struct request_queue *dm_get_md_queue(struct mapped_device *md); int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, struct dm_dev **result); void dm_put_table_device(struct mapped_device *md, struct dm_dev *d); -struct dm_stats *dm_get_stats(struct mapped_device *md); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie); From d4a512edcc65704c5df51909caeab0495ba0ced3 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Sat, 19 Sep 2020 13:36:58 -0400 Subject: [PATCH 09/20] dm: use dm_table_get_device_name() where appropriate in targets dm_table_get_device_name() avoids calling dm_table_get_md() followed by dm_device_name() -- saves intermediate dm_table_get_md() call. Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-target.c | 2 +- drivers/md/dm-mpath.c | 16 +++++++--------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 96c93802ee4d..9644424591da 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -925,7 +925,7 @@ static enum cache_metadata_mode get_cache_mode(struct cache *cache) static const char *cache_device_name(struct cache *cache) { - return dm_device_name(dm_table_get_md(cache->ti->table)); + return dm_table_device_name(cache->ti->table); } static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index de4da825ade6..bced42f082b0 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -466,10 +466,8 @@ failed: */ #define dm_report_EIO(m) \ do { \ - struct mapped_device *md = dm_table_get_md((m)->ti->table); \ - \ DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \ - dm_device_name(md), \ + dm_table_device_name((m)->ti->table), \ test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ dm_noflush_suspending((m)->ti)); \ @@ -736,7 +734,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, { unsigned long flags; bool queue_if_no_path_bit, saved_queue_if_no_path_bit; - const char *dm_dev_name = dm_device_name(dm_table_get_md(m->ti->table)); + const char *dm_dev_name = dm_table_device_name(m->ti->table); DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d", dm_dev_name, __func__, caller, queue_if_no_path, save_old_value); @@ -781,9 +779,9 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, static void queue_if_no_path_timeout_work(struct timer_list *t) { struct multipath *m = from_timer(m, t, nopath_timer); - struct mapped_device *md = dm_table_get_md(m->ti->table); - DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md)); + DMWARN("queue_if_no_path timeout on %s, failing queued IO", + dm_table_device_name(m->ti->table)); queue_if_no_path(m, false, false, __func__); } @@ -1334,7 +1332,7 @@ static int fail_path(struct pgpath *pgpath) goto out; DMWARN("%s: Failing path %s.", - dm_device_name(dm_table_get_md(m->ti->table)), + dm_table_device_name(m->ti->table), pgpath->path.dev->name); pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); @@ -1375,7 +1373,7 @@ static int reinstate_path(struct pgpath *pgpath) goto out; DMWARN("%s: Reinstating path %s.", - dm_device_name(dm_table_get_md(m->ti->table)), + dm_table_device_name(m->ti->table), pgpath->path.dev->name); r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); @@ -1766,7 +1764,7 @@ static void multipath_resume(struct dm_target *ti) } DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d", - dm_device_name(dm_table_get_md(m->ti->table)), __func__, + dm_table_device_name(m->ti->table), __func__, test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags), test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)); From cd74693870fb748d812867ba49af733d689a3604 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Thu, 9 Jul 2020 23:20:42 -0700 Subject: [PATCH 10/20] dm crypt: don't use drivers that have CRYPTO_ALG_ALLOCATES_MEMORY Don't use crypto drivers that have the flag CRYPTO_ALG_ALLOCATES_MEMORY set. These drivers allocate memory and thus they are unsuitable for block I/O processing. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-crypt.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 380386c36921..392337f16ecf 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -424,7 +424,8 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); + lmk->hash_tfm = crypto_alloc_shash("md5", 0, + CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(lmk->hash_tfm)) { ti->error = "Error initializing LMK hash"; return PTR_ERR(lmk->hash_tfm); @@ -586,7 +587,8 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); + tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, + CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(tcw->crc32_tfm)) { ti->error = "Error initializing CRC32 in TCW"; return PTR_ERR(tcw->crc32_tfm); @@ -773,7 +775,8 @@ static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti, struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; int r; - elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); + elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, + CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(elephant->tfm)) { r = PTR_ERR(elephant->tfm); elephant->tfm = NULL; @@ -2154,7 +2157,8 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode) return -ENOMEM; for (i = 0; i < cc->tfms_count; i++) { - cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); + cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, + CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(cc->cipher_tfm.tfms[i])) { err = PTR_ERR(cc->cipher_tfm.tfms[i]); crypt_free_tfms(cc); @@ -2180,7 +2184,8 @@ static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode) if (!cc->cipher_tfm.tfms) return -ENOMEM; - cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0); + cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, + CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) { err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]); crypt_free_tfms(cc); @@ -2667,7 +2672,7 @@ static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api) return -ENOMEM; strncpy(mac_alg, start, end - start); - mac = crypto_alloc_ahash(mac_alg, 0, 0); + mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY); kfree(mac_alg); if (IS_ERR(mac)) From e0910c8e4f87bb9f767e61a778b0d9271c4dc512 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 24 Sep 2020 13:14:52 -0400 Subject: [PATCH 11/20] dm raid: fix discard limits for raid1 and raid10 Block core warned that discard_granularity was 0 for dm-raid with personality of raid1. Reason is that raid_io_hints() was incorrectly special-casing raid1 rather than raid0. But since commit 29efc390b9462 ("md/md0: optimize raid0 discard handling") even raid0 properly handles large discards. Fix raid_io_hints() by removing discard limits settings for raid1. Also, fix limits for raid10 by properly stacking underlying limits as done in blk_stack_limits(). Depends-on: 29efc390b9462 ("md/md0: optimize raid0 discard handling") Fixes: 61697a6abd24a ("dm: eliminate 'split_discard_bios' flag from DM target interface") Cc: stable@vger.kernel.org Reported-by: Zdenek Kabelac Reported-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-raid.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 56b723d012ac..dc8568ab96f2 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3730,12 +3730,14 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); /* - * RAID1 and RAID10 personalities require bio splitting, - * RAID0/4/5/6 don't and process large discard bios properly. + * RAID10 personality requires bio splitting, + * RAID0/1/4/5/6 don't and process large discard bios properly. */ - if (rs_is_raid1(rs) || rs_is_raid10(rs)) { - limits->discard_granularity = chunk_size_bytes; - limits->max_discard_sectors = rs->md.chunk_sectors; + if (rs_is_raid10(rs)) { + limits->discard_granularity = max(chunk_size_bytes, + limits->discard_granularity); + limits->max_discard_sectors = min_not_zero(rs->md.chunk_sectors, + limits->max_discard_sectors); } } From f0e90b6c663a7e3b4736cb318c6c7c589f152c28 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 24 Sep 2020 16:40:12 -0400 Subject: [PATCH 12/20] dm raid: remove unnecessary discard limits for raid10 Commit bcc90d280465e ("md/raid10: improve raid10 discard request") removes raid10's inability to properly handle large discards. So eliminate associated constraint from dm-raid's raid10 support. Signed-off-by: Mike Snitzer --- drivers/md/dm-raid.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index dc8568ab96f2..9c1f7c4de65b 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3728,17 +3728,6 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_min(limits, chunk_size_bytes); blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); - - /* - * RAID10 personality requires bio splitting, - * RAID0/1/4/5/6 don't and process large discard bios properly. - */ - if (rs_is_raid10(rs)) { - limits->discard_granularity = max(chunk_size_bytes, - limits->discard_granularity); - limits->max_discard_sectors = min_not_zero(rs->md.chunk_sectors, - limits->max_discard_sectors); - } } static void raid_postsuspend(struct dm_target *ti) From 399c9bdbd6500254895bdbb574a4acbb860cda41 Mon Sep 17 00:00:00 2001 From: Huaisheng Ye Date: Tue, 15 Sep 2020 16:56:08 +0800 Subject: [PATCH 13/20] dm thin metadata: Remove unused local variable when create thin and snap The local variable disk details is not used during the creating of thin & snap devices. Remove them from dm-thin-metadata, and add pointer validity check for pointer value in btree_lookup_raw. Skip memory copy when the caller doesn't need the value. Signed-off-by: Huaisheng Ye Signed-off-by: Mike Snitzer --- drivers/md/dm-thin-metadata.c | 6 ++---- drivers/md/persistent-data/dm-btree.c | 3 ++- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index b461836b6d26..6ebb2127f3e2 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1051,12 +1051,11 @@ static int __create_thin(struct dm_pool_metadata *pmd, int r; dm_block_t dev_root; uint64_t key = dev; - struct disk_device_details details_le; struct dm_thin_device *td; __le64 value; r = dm_btree_lookup(&pmd->details_info, pmd->details_root, - &key, &details_le); + &key, NULL); if (!r) return -EEXIST; @@ -1129,12 +1128,11 @@ static int __create_snap(struct dm_pool_metadata *pmd, dm_block_t origin_root; uint64_t key = origin, dev_key = dev; struct dm_thin_device *td; - struct disk_device_details details_le; __le64 value; /* check this device is unused */ r = dm_btree_lookup(&pmd->details_info, pmd->details_root, - &dev_key, &details_le); + &dev_key, NULL); if (!r) return -EEXIST; diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 8aae0624a297..ef6e78d45d5b 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -366,7 +366,8 @@ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key, } while (!(flags & LEAF_NODE)); *result_key = le64_to_cpu(ro_node(s)->keys[i]); - memcpy(v, value_ptr(ro_node(s), i), value_size); + if (v) + memcpy(v, value_ptr(ro_node(s), i), value_size); return 0; } From 7d837c0dd95c4ab34ffec9f61000bb3b384da1a9 Mon Sep 17 00:00:00 2001 From: Qinglang Miao Date: Mon, 21 Sep 2020 21:10:10 +0800 Subject: [PATCH 14/20] dm snap persistent: simplify area_io() Signed-off-by: Qinglang Miao Signed-off-by: Mike Snitzer --- drivers/md/dm-snap-persistent.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 63fab7c769be..8e329c3f3a78 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -284,16 +284,9 @@ static void skip_metadata(struct pstore *ps) */ static int area_io(struct pstore *ps, int op, int op_flags) { - int r; - chunk_t chunk; + chunk_t chunk = area_location(ps, ps->current_area); - chunk = area_location(ps, ps->current_area); - - r = chunk_io(ps, ps->area, chunk, op, op_flags, 0); - if (r) - return r; - - return 0; + return chunk_io(ps, ps->area, chunk, op, op_flags, 0); } static void zero_memory_area(struct pstore *ps) From 0c2915b8c6db108b1dfb240391cc5a175f97f15b Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 28 Sep 2020 13:41:36 -0400 Subject: [PATCH 15/20] dm: fix missing imposition of queue_limits from dm_wq_work() thread If a DM device was suspended when bios were issued to it, those bios would be deferred using queue_io(). Once the DM device was resumed dm_process_bio() could be called by dm_wq_work() for original bio that still needs splitting. dm_process_bio()'s check for current->bio_list (meaning call chain is within ->submit_bio) as a prerequisite for calling blk_queue_split() for "abnormal IO" would result in dm_process_bio() never imposing corresponding queue_limits (e.g. discard_granularity, discard_max_bytes, etc). Fix this by always having dm_wq_work() resubmit deferred bios using submit_bio_noacct(). Side-effect is blk_queue_split() is always called for "abnormal IO" from ->submit_bio, be it from application thread or dm_wq_work() workqueue, so proper bio splitting and depth-first bio submission is performed. For sake of clarity, remove current->bio_list check before call to blk_queue_split(). Also, remove dm_wq_work()'s use of dm_{get,put}_live_table() -- no longer needed since IO will be reissued in terms of ->submit_bio. And rename bio variable from 'c' to 'bio'. Fixes: cf9c37865557 ("dm: fix comment in dm_process_bio()") Reported-by: Jeffle Xu Reviewed-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a1adcf0ab821..80266b94b002 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1676,17 +1676,11 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, } /* - * If in ->submit_bio we need to use blk_queue_split(), otherwise - * queue_limits for abnormal requests (e.g. discard, writesame, etc) - * won't be imposed. - * If called from dm_wq_work() for deferred bio processing, bio - * was already handled by following code with previous ->submit_bio. + * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) + * otherwise associated queue_limits won't be imposed. */ - if (current->bio_list) { - if (is_abnormal_io(bio)) - blk_queue_split(&bio); - /* regular IO is split by __split_and_process_bio */ - } + if (is_abnormal_io(bio)) + blk_queue_split(&bio); if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) return __process_bio(md, map, bio); @@ -2383,29 +2377,19 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state) */ static void dm_wq_work(struct work_struct *work) { - struct mapped_device *md = container_of(work, struct mapped_device, - work); - struct bio *c; - int srcu_idx; - struct dm_table *map; - - map = dm_get_live_table(md, &srcu_idx); + struct mapped_device *md = container_of(work, struct mapped_device, work); + struct bio *bio; while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { spin_lock_irq(&md->deferred_lock); - c = bio_list_pop(&md->deferred); + bio = bio_list_pop(&md->deferred); spin_unlock_irq(&md->deferred_lock); - if (!c) + if (!bio) break; - if (dm_request_based(md)) - (void) submit_bio_noacct(c); - else - (void) dm_process_bio(md, map, c); + submit_bio_noacct(bio); } - - dm_put_live_table(md, srcu_idx); } static void dm_queue_flush(struct mapped_device *md) From b2abdb1b4b9eaffc4f41aa466ce77c2d91bb23df Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 30 Sep 2020 13:45:20 -0400 Subject: [PATCH 16/20] dm: fold dm_process_bio() into dm_submit_bio() dm_process_bio() is only called by dm_submit_bio(), there is no benefit to keeping dm_process_bio() factored out, so fold it. While at it, cleanup dm_submit_bio()'s DMF_BLOCK_IO_FOR_SUSPEND related branching and expand scope of dm_get_live_table() rcu reference on map via common 'out' label to dm_put_live_table(). Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 62 +++++++++++++++++++++---------------------------- 1 file changed, 27 insertions(+), 35 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 80266b94b002..93ca051f88f0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1665,28 +1665,6 @@ out: return ret; } -static blk_qc_t dm_process_bio(struct mapped_device *md, - struct dm_table *map, struct bio *bio) -{ - blk_qc_t ret = BLK_QC_T_NONE; - - if (unlikely(!map)) { - bio_io_error(bio); - return ret; - } - - /* - * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) - * otherwise associated queue_limits won't be imposed. - */ - if (is_abnormal_io(bio)) - blk_queue_split(&bio); - - if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) - return __process_bio(md, map, bio); - return __split_and_process_bio(md, map, bio); -} - static blk_qc_t dm_submit_bio(struct bio *bio) { struct mapped_device *md = bio->bi_disk->private_data; @@ -1707,22 +1685,36 @@ static blk_qc_t dm_submit_bio(struct bio *bio) } map = dm_get_live_table(md, &srcu_idx); - - /* if we're suspended, we have to queue this io for later */ - if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { - dm_put_live_table(md, srcu_idx); - - if (bio->bi_opf & REQ_NOWAIT) - bio_wouldblock_error(bio); - else if (!(bio->bi_opf & REQ_RAHEAD)) - queue_io(md, bio); - else - bio_io_error(bio); - return ret; + if (unlikely(!map)) { + DMERR_LIMIT("%s: mapping table unavailable, erroring io", + dm_device_name(md)); + bio_io_error(bio); + goto out; } - ret = dm_process_bio(md, map, bio); + /* If suspended, queue this IO for later */ + if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { + if (bio->bi_opf & REQ_NOWAIT) + bio_wouldblock_error(bio); + else if (bio->bi_opf & REQ_RAHEAD) + bio_io_error(bio); + else + queue_io(md, bio); + goto out; + } + /* + * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) + * otherwise associated queue_limits won't be imposed. + */ + if (is_abnormal_io(bio)) + blk_queue_split(&bio); + + if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) + ret = __process_bio(md, map, bio); + else + ret = __split_and_process_bio(md, map, bio); +out: dm_put_live_table(md, srcu_idx); return ret; } From 0cede372ce6a8adf4d4d28fe7edd2aa913804595 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 30 Sep 2020 15:12:04 -0400 Subject: [PATCH 17/20] dm: fix comment in __dm_suspend() Fix stale references to functions that have been renamed and fix typo. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 93ca051f88f0..32ac19645255 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2518,13 +2518,12 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, /* * Here we must make sure that no processes are submitting requests * to target drivers i.e. no one may be executing - * __split_and_process_bio. This is called from dm_request and - * dm_wq_work. + * __split_and_process_bio from dm_submit_bio. * - * To get all processes out of __split_and_process_bio in dm_request, + * To get all processes out of __split_and_process_bio in dm_submit_bio, * we take the write lock. To prevent any process from reentering - * __split_and_process_bio from dm_request and quiesce the thread - * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call + * __split_and_process_bio from dm_submit_bio and quiesce the thread + * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call * flush_workqueue(md->wq). */ set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); From 61931c0ee9cf5da575996b977a2358b598ef84bb Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 1 Oct 2020 15:00:56 -0400 Subject: [PATCH 18/20] dm: export dm_copy_name_and_uuid Allow DM targets to access the configured name and uuid. Also, bump DM ioctl version. Signed-off-by: Mike Snitzer --- drivers/md/dm-ioctl.c | 2 +- include/uapi/linux/dm-ioctl.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 28122e850ea1..cd0478d44058 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -2044,7 +2044,7 @@ out: return r; } - +EXPORT_SYMBOL_GPL(dm_copy_name_and_uuid); /** * dm_early_create - create a mapped device in early boot. diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 6622912c2342..4933b6b67b85 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -272,9 +272,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 42 +#define DM_VERSION_MINOR 43 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2020-02-27)" +#define DM_VERSION_EXTRA "-ioctl (2020-10-01)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ From 9c37de297f6590937f95a28bec1b7ac68a38618f Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 7 Oct 2020 15:15:08 -0400 Subject: [PATCH 19/20] dm: remove special-casing of bio-based immutable singleton target on NVMe Since commit 5a6c35f9af416 ("block: remove direct_make_request") there is no benefit to DM special-casing NVMe. Remove all code used to establish DM_TYPE_NVME_BIO_BASED. Signed-off-by: Mike Snitzer --- drivers/md/dm-table.c | 32 ++------------------ drivers/md/dm.c | 55 ++++------------------------------- include/linux/device-mapper.h | 1 - 3 files changed, 7 insertions(+), 81 deletions(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 3ad22adf322d..ce543b761be7 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -804,8 +804,7 @@ EXPORT_SYMBOL(dm_consume_args); static bool __table_type_bio_based(enum dm_queue_mode table_type) { return (table_type == DM_TYPE_BIO_BASED || - table_type == DM_TYPE_DAX_BIO_BASED || - table_type == DM_TYPE_NVME_BIO_BASED); + table_type == DM_TYPE_DAX_BIO_BASED); } static bool __table_type_request_based(enum dm_queue_mode table_type) @@ -861,8 +860,6 @@ bool dm_table_supports_dax(struct dm_table *t, return true; } -static bool dm_table_does_not_support_partial_completion(struct dm_table *t); - static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -892,7 +889,6 @@ static int dm_table_determine_type(struct dm_table *t) goto verify_bio_based; } BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); - BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED); goto verify_rq_based; } @@ -931,15 +927,6 @@ verify_bio_based: if (dm_table_supports_dax(t, device_supports_dax, &page_size) || (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { t->type = DM_TYPE_DAX_BIO_BASED; - } else { - /* Check if upgrading to NVMe bio-based is valid or required */ - tgt = dm_table_get_immutable_target(t); - if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) { - t->type = DM_TYPE_NVME_BIO_BASED; - goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */ - } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) { - t->type = DM_TYPE_NVME_BIO_BASED; - } } return 0; } @@ -956,8 +943,7 @@ verify_rq_based: * (e.g. request completion process for partial completion.) */ if (t->num_targets > 1) { - DMERR("%s DM doesn't support multiple targets", - t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based"); + DMERR("request-based DM doesn't support multiple targets"); return -EINVAL; } @@ -1651,20 +1637,6 @@ static bool dm_table_all_devices_attribute(struct dm_table *t, return true; } -static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - char b[BDEVNAME_SIZE]; - - /* For now, NVMe devices are the only devices of this class */ - return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); -} - -static bool dm_table_does_not_support_partial_completion(struct dm_table *t) -{ - return dm_table_all_devices_attribute(t, device_no_partial_completion); -} - static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 32ac19645255..af1bab3a810e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -975,7 +975,7 @@ static void clone_endio(struct bio *bio) dm_endio_fn endio = tio->ti->type->end_io; struct bio *orig_bio = io->orig_bio; - if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { + if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_DISCARD && !bio->bi_disk->queue->limits.max_discard_sectors) disable_discard(md); @@ -1626,45 +1626,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, return ret; } -/* - * Optimized variant of __split_and_process_bio that leverages the - * fact that targets that use it do _not_ have a need to split bios. - */ -static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, - struct bio *bio) -{ - struct clone_info ci; - blk_qc_t ret = BLK_QC_T_NONE; - int error = 0; - - init_clone_info(&ci, md, map, bio); - - if (bio->bi_opf & REQ_PREFLUSH) { - error = __send_empty_flush(&ci); - /* dec_pending submits any data associated with flush */ - } else { - struct dm_target_io *tio; - struct dm_target *ti = md->immutable_target; - - if (WARN_ON_ONCE(!ti)) { - error = -EIO; - goto out; - } - - ci.bio = bio; - ci.sector_count = bio_sectors(bio); - if (__process_abnormal_io(&ci, ti, &error)) - goto out; - - tio = alloc_tio(&ci, ti, 0, GFP_NOIO); - ret = __clone_and_map_simple_bio(&ci, tio, NULL); - } -out: - /* drop the extra reference count */ - dec_pending(ci.io, errno_to_blk_status(error)); - return ret; -} - static blk_qc_t dm_submit_bio(struct bio *bio) { struct mapped_device *md = bio->bi_disk->private_data; @@ -1710,10 +1671,7 @@ static blk_qc_t dm_submit_bio(struct bio *bio) if (is_abnormal_io(bio)) blk_queue_split(&bio); - if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) - ret = __process_bio(md, map, bio); - else - ret = __split_and_process_bio(md, map, bio); + ret = __split_and_process_bio(md, map, bio); out: dm_put_live_table(md, srcu_idx); return ret; @@ -2038,11 +1996,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, if (request_based) dm_stop_queue(q); - if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { + if (request_based) { /* - * Leverage the fact that request-based DM targets and - * NVMe bio based targets are immutable singletons - * - used to optimize both __process_bio and dm_mq_queue_rq + * Leverage the fact that request-based DM targets are + * immutable singletons - used to optimize dm_mq_queue_rq. */ md->immutable_target = dm_table_get_immutable_target(t); } @@ -2164,7 +2121,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: - case DM_TYPE_NVME_BIO_BASED: break; case DM_TYPE_NONE: WARN_ON_ONCE(true); @@ -2922,7 +2878,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu switch (type) { case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: - case DM_TYPE_NVME_BIO_BASED: pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index d6f8d4ba8d48..61a66fb8ebb3 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -29,7 +29,6 @@ enum dm_queue_mode { DM_TYPE_BIO_BASED = 1, DM_TYPE_REQUEST_BASED = 2, DM_TYPE_DAX_BIO_BASED = 3, - DM_TYPE_NVME_BIO_BASED = 4, }; typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; From 681cc5e8667e8579a2da8fa4090c48a2d73fc3bb Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 7 Oct 2020 16:41:01 -0400 Subject: [PATCH 20/20] dm: fix request-based DM to not bounce through indirect dm_submit_bio It is unnecessary to force request-based DM to call into bio-based dm_submit_bio (via indirect disk->fops->submit_bio) only to have it then call blk_mq_submit_bio(). Fix this by establishing a request-based DM block_device_operations (dm_rq_blk_dops, which doesn't have .submit_bio) and update dm_setup_md_queue() to set md->disk->fops to it for DM_TYPE_REQUEST_BASED. Remove DM_TYPE_REQUEST_BASED conditional in dm_submit_bio and unexport blk_mq_submit_bio. Fixes: c62b37d96b6eb ("block: move ->make_request_fn to struct block_device_operations") Signed-off-by: Mike Snitzer --- block/blk-mq.c | 1 - drivers/md/dm.c | 25 ++++++++++++------------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 176698b2285b..0191fc0447f7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2265,7 +2265,6 @@ queue_exit: blk_queue_exit(q); return BLK_QC_T_NONE; } -EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index af1bab3a810e..e396ec2b45c3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1633,18 +1633,6 @@ static blk_qc_t dm_submit_bio(struct bio *bio) int srcu_idx; struct dm_table *map; - if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { - /* - * We are called with a live reference on q_usage_counter, but - * that one will be released as soon as we return. Grab an - * extra one as blk_mq_submit_bio expects to be able to consume - * a reference (which lives until the request is freed in case a - * request is allocated). - */ - percpu_ref_get(&bio->bi_disk->queue->q_usage_counter); - return blk_mq_submit_bio(bio); - } - map = dm_get_live_table(md, &srcu_idx); if (unlikely(!map)) { DMERR_LIMIT("%s: mapping table unavailable, erroring io", @@ -1727,6 +1715,7 @@ static int next_free_minor(int *minor) } static const struct block_device_operations dm_blk_dops; +static const struct block_device_operations dm_rq_blk_dops; static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); @@ -2113,9 +2102,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) switch (type) { case DM_TYPE_REQUEST_BASED: + md->disk->fops = &dm_rq_blk_dops; r = dm_mq_init_request_queue(md, t); if (r) { - DMERR("Cannot initialize queue for request-based dm-mq mapped device"); + DMERR("Cannot initialize queue for request-based dm mapped device"); return r; } break; @@ -3095,6 +3085,15 @@ static const struct block_device_operations dm_blk_dops = { .owner = THIS_MODULE }; +static const struct block_device_operations dm_rq_blk_dops = { + .open = dm_blk_open, + .release = dm_blk_close, + .ioctl = dm_blk_ioctl, + .getgeo = dm_blk_getgeo, + .pr_ops = &dm_pr_ops, + .owner = THIS_MODULE +}; + static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, .dax_supported = dm_dax_supported,