- Improve DM core's bio splitting to use blk_max_size_offset(). Also

fix bio splitting for bios that were deferred to the worker thread
   due to a DM device being suspended.
 
 - Remove DM core's special handling of NVMe devices now that block
   core has internalized efficiencies drivers previously needed to
   be concerned about (via now removed direct_make_request).
 
 - Fix request-based DM to not bounce through indirect dm_submit_bio;
   instead have block core make direct call to blk_mq_submit_bio().
 
 - Various DM core cleanups to simplify and improve code.
 
 - Update DM cryot to not use drivers that set
   CRYPTO_ALG_ALLOCATES_MEMORY.
 
 - Fix DM raid's raid1 and raid10 discard limits for the purposes of
   linux-stable. But then remove DM raid's discard limits settings now
   that MD raid can efficiently handle large discards.
 
 - A couple small cleanups across various targets.
 -----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAl+Fx1gTHHNuaXR6ZXJA
 cmVkaGF0LmNvbQAKCRDFI/EKLZ0DWk5iB/9pONYmtfQ5oBx4jg/PU8cVYYIfOtwS
 ZtItFbw7T9bkHVZ8d4hDr5LTq898cADuRD5edlR82gDOcXkiJlb5PqU39RoOTVvF
 Xz87sWzHdGAK7rdnCMAc2hiX3oQOje9o7NxGeGQ/uPaNU+U/vJS0AZtEAwltocBd
 j9MGESddBC636Gzbg5C0c0frikXd0am6qp6SCYJNpP5I0G2beHk2YX5Jqt9c7zMk
 8kyQend5b5RvkPNWTAjkVfWUsIjwYHh6MF48ZoGvD0X3lWjIBiwyxC0UX5hSXq63
 kB+nqxbXcvQLEBtJuDZ2bjyvrwzCVLpmfgLgzxOOU8fI5Q2U0zpsPaa0
 =6YDu
 -----END PGP SIGNATURE-----

Merge tag 'for-5.10/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Improve DM core's bio splitting to use blk_max_size_offset(). Also
   fix bio splitting for bios that were deferred to the worker thread
   due to a DM device being suspended.

 - Remove DM core's special handling of NVMe devices now that block core
   has internalized efficiencies drivers previously needed to be
   concerned about (via now removed direct_make_request).

 - Fix request-based DM to not bounce through indirect dm_submit_bio;
   instead have block core make direct call to blk_mq_submit_bio().

 - Various DM core cleanups to simplify and improve code.

 - Update DM cryot to not use drivers that set
   CRYPTO_ALG_ALLOCATES_MEMORY.

 - Fix DM raid's raid1 and raid10 discard limits for the purposes of
   linux-stable. But then remove DM raid's discard limits settings now
   that MD raid can efficiently handle large discards.

 - A couple small cleanups across various targets.

* tag 'for-5.10/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: fix request-based DM to not bounce through indirect dm_submit_bio
  dm: remove special-casing of bio-based immutable singleton target on NVMe
  dm: export dm_copy_name_and_uuid
  dm: fix comment in __dm_suspend()
  dm: fold dm_process_bio() into dm_submit_bio()
  dm: fix missing imposition of queue_limits from dm_wq_work() thread
  dm snap persistent: simplify area_io()
  dm thin metadata: Remove unused local variable when create thin and snap
  dm raid: remove unnecessary discard limits for raid10
  dm raid: fix discard limits for raid1 and raid10
  dm crypt: don't use drivers that have CRYPTO_ALG_ALLOCATES_MEMORY
  dm: use dm_table_get_device_name() where appropriate in targets
  dm table: make 'struct dm_table' definition accessible to all of DM core
  dm: eliminate need for start_io_acct() forward declaration
  dm: simplify __process_abnormal_io()
  dm: push use of on-stack flush_bio down to __send_empty_flush()
  dm: optimize max_io_len() by inlining max_io_len_target_boundary()
  dm: push md->immutable_target optimization down to __process_bio()
  dm: change max_io_len() to use blk_max_size_offset()
  dm table: stack 'chunk_sectors' limit to account for target-specific splitting
This commit is contained in:
Linus Torvalds 2020-10-14 15:05:38 -07:00
commit 4815519ed0
16 changed files with 224 additions and 397 deletions

View File

@ -2270,7 +2270,6 @@ queue_exit:
blk_queue_exit(q); blk_queue_exit(q);
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx) unsigned int hctx_idx)

View File

@ -925,7 +925,7 @@ static enum cache_metadata_mode get_cache_mode(struct cache *cache)
static const char *cache_device_name(struct cache *cache) static const char *cache_device_name(struct cache *cache)
{ {
return dm_device_name(dm_table_get_md(cache->ti->table)); return dm_table_device_name(cache->ti->table);
} }
static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode) static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)

View File

@ -11,6 +11,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/genhd.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <trace/events/block.h> #include <trace/events/block.h>
@ -25,9 +26,11 @@ struct dm_kobject_holder {
}; };
/* /*
* DM core internal structure that used directly by dm.c and dm-rq.c * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
* DM targets must _not_ deference a mapped_device to directly access its members! * DM targets must _not_ deference a mapped_device or dm_table to directly
* access their members!
*/ */
struct mapped_device { struct mapped_device {
struct mutex suspend_lock; struct mutex suspend_lock;
@ -119,6 +122,55 @@ void disable_discard(struct mapped_device *md);
void disable_write_same(struct mapped_device *md); void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md);
static inline sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
}
static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
{
return &md->stats;
}
#define DM_TABLE_MAX_DEPTH 16
struct dm_table {
struct mapped_device *md;
enum dm_queue_mode type;
/* btree table */
unsigned int depth;
unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
sector_t *index[DM_TABLE_MAX_DEPTH];
unsigned int num_targets;
unsigned int num_allocated;
sector_t *highs;
struct dm_target *targets;
struct target_type *immutable_target_type;
bool integrity_supported:1;
bool singleton:1;
unsigned integrity_added:1;
/*
* Indicates the rw permissions for the new logical
* device. This should be a combination of FMODE_READ
* and FMODE_WRITE.
*/
fmode_t mode;
/* a list of devices used by this table */
struct list_head devices;
/* events get handed up using this callback */
void (*event_fn)(void *);
void *event_context;
struct dm_md_mempools *mempools;
};
static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
{ {
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;

View File

@ -424,7 +424,8 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
return -EINVAL; return -EINVAL;
} }
lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); lmk->hash_tfm = crypto_alloc_shash("md5", 0,
CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(lmk->hash_tfm)) { if (IS_ERR(lmk->hash_tfm)) {
ti->error = "Error initializing LMK hash"; ti->error = "Error initializing LMK hash";
return PTR_ERR(lmk->hash_tfm); return PTR_ERR(lmk->hash_tfm);
@ -586,7 +587,8 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
return -EINVAL; return -EINVAL;
} }
tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(tcw->crc32_tfm)) { if (IS_ERR(tcw->crc32_tfm)) {
ti->error = "Error initializing CRC32 in TCW"; ti->error = "Error initializing CRC32 in TCW";
return PTR_ERR(tcw->crc32_tfm); return PTR_ERR(tcw->crc32_tfm);
@ -773,7 +775,8 @@ static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
int r; int r;
elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(elephant->tfm)) { if (IS_ERR(elephant->tfm)) {
r = PTR_ERR(elephant->tfm); r = PTR_ERR(elephant->tfm);
elephant->tfm = NULL; elephant->tfm = NULL;
@ -2154,7 +2157,8 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < cc->tfms_count; i++) { for (i = 0; i < cc->tfms_count; i++) {
cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(cc->cipher_tfm.tfms[i])) { if (IS_ERR(cc->cipher_tfm.tfms[i])) {
err = PTR_ERR(cc->cipher_tfm.tfms[i]); err = PTR_ERR(cc->cipher_tfm.tfms[i]);
crypt_free_tfms(cc); crypt_free_tfms(cc);
@ -2180,7 +2184,8 @@ static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
if (!cc->cipher_tfm.tfms) if (!cc->cipher_tfm.tfms)
return -ENOMEM; return -ENOMEM;
cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0); cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) { if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]); err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
crypt_free_tfms(cc); crypt_free_tfms(cc);
@ -2667,7 +2672,7 @@ static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
return -ENOMEM; return -ENOMEM;
strncpy(mac_alg, start, end - start); strncpy(mac_alg, start, end - start);
mac = crypto_alloc_ahash(mac_alg, 0, 0); mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
kfree(mac_alg); kfree(mac_alg);
if (IS_ERR(mac)) if (IS_ERR(mac))

View File

@ -2044,7 +2044,7 @@ out:
return r; return r;
} }
EXPORT_SYMBOL_GPL(dm_copy_name_and_uuid);
/** /**
* dm_early_create - create a mapped device in early boot. * dm_early_create - create a mapped device in early boot.

View File

@ -466,10 +466,8 @@ failed:
*/ */
#define dm_report_EIO(m) \ #define dm_report_EIO(m) \
do { \ do { \
struct mapped_device *md = dm_table_get_md((m)->ti->table); \
\
DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \ DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
dm_device_name(md), \ dm_table_device_name((m)->ti->table), \
test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
dm_noflush_suspending((m)->ti)); \ dm_noflush_suspending((m)->ti)); \
@ -736,7 +734,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
{ {
unsigned long flags; unsigned long flags;
bool queue_if_no_path_bit, saved_queue_if_no_path_bit; bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
const char *dm_dev_name = dm_device_name(dm_table_get_md(m->ti->table)); const char *dm_dev_name = dm_table_device_name(m->ti->table);
DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d", DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
dm_dev_name, __func__, caller, queue_if_no_path, save_old_value); dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
@ -781,9 +779,9 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
static void queue_if_no_path_timeout_work(struct timer_list *t) static void queue_if_no_path_timeout_work(struct timer_list *t)
{ {
struct multipath *m = from_timer(m, t, nopath_timer); struct multipath *m = from_timer(m, t, nopath_timer);
struct mapped_device *md = dm_table_get_md(m->ti->table);
DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md)); DMWARN("queue_if_no_path timeout on %s, failing queued IO",
dm_table_device_name(m->ti->table));
queue_if_no_path(m, false, false, __func__); queue_if_no_path(m, false, false, __func__);
} }
@ -1334,7 +1332,7 @@ static int fail_path(struct pgpath *pgpath)
goto out; goto out;
DMWARN("%s: Failing path %s.", DMWARN("%s: Failing path %s.",
dm_device_name(dm_table_get_md(m->ti->table)), dm_table_device_name(m->ti->table),
pgpath->path.dev->name); pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
@ -1375,7 +1373,7 @@ static int reinstate_path(struct pgpath *pgpath)
goto out; goto out;
DMWARN("%s: Reinstating path %s.", DMWARN("%s: Reinstating path %s.",
dm_device_name(dm_table_get_md(m->ti->table)), dm_table_device_name(m->ti->table),
pgpath->path.dev->name); pgpath->path.dev->name);
r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
@ -1766,7 +1764,7 @@ static void multipath_resume(struct dm_target *ti)
} }
DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d", DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
dm_device_name(dm_table_get_md(m->ti->table)), __func__, dm_table_device_name(m->ti->table), __func__,
test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags), test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)); test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));

View File

@ -3728,15 +3728,6 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, chunk_size_bytes); blk_limits_io_min(limits, chunk_size_bytes);
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
/*
* RAID1 and RAID10 personalities require bio splitting,
* RAID0/4/5/6 don't and process large discard bios properly.
*/
if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
limits->discard_granularity = chunk_size_bytes;
limits->max_discard_sectors = rs->md.chunk_sectors;
}
} }
static void raid_postsuspend(struct dm_target *ti) static void raid_postsuspend(struct dm_target *ti)

View File

@ -175,7 +175,7 @@ static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long mse
void dm_mq_kick_requeue_list(struct mapped_device *md) void dm_mq_kick_requeue_list(struct mapped_device *md)
{ {
__dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); __dm_mq_kick_requeue_list(md->queue, 0);
} }
EXPORT_SYMBOL(dm_mq_kick_requeue_list); EXPORT_SYMBOL(dm_mq_kick_requeue_list);

View File

@ -284,16 +284,9 @@ static void skip_metadata(struct pstore *ps)
*/ */
static int area_io(struct pstore *ps, int op, int op_flags) static int area_io(struct pstore *ps, int op, int op_flags)
{ {
int r; chunk_t chunk = area_location(ps, ps->current_area);
chunk_t chunk;
chunk = area_location(ps, ps->current_area); return chunk_io(ps, ps->area, chunk, op, op_flags, 0);
r = chunk_io(ps, ps->area, chunk, op, op_flags, 0);
if (r)
return r;
return 0;
} }
static void zero_memory_area(struct pstore *ps) static void zero_memory_area(struct pstore *ps)

View File

@ -18,54 +18,17 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/lcm.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/dax.h> #include <linux/dax.h>
#define DM_MSG_PREFIX "table" #define DM_MSG_PREFIX "table"
#define MAX_DEPTH 16
#define NODE_SIZE L1_CACHE_BYTES #define NODE_SIZE L1_CACHE_BYTES
#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
struct dm_table {
struct mapped_device *md;
enum dm_queue_mode type;
/* btree table */
unsigned int depth;
unsigned int counts[MAX_DEPTH]; /* in nodes */
sector_t *index[MAX_DEPTH];
unsigned int num_targets;
unsigned int num_allocated;
sector_t *highs;
struct dm_target *targets;
struct target_type *immutable_target_type;
bool integrity_supported:1;
bool singleton:1;
unsigned integrity_added:1;
/*
* Indicates the rw permissions for the new logical
* device. This should be a combination of FMODE_READ
* and FMODE_WRITE.
*/
fmode_t mode;
/* a list of devices used by this table */
struct list_head devices;
/* events get handed up using this callback */
void (*event_fn)(void *);
void *event_context;
struct dm_md_mempools *mempools;
};
/* /*
* Similar to ceiling(log_size(n)) * Similar to ceiling(log_size(n))
*/ */
@ -841,8 +804,7 @@ EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type) static bool __table_type_bio_based(enum dm_queue_mode table_type)
{ {
return (table_type == DM_TYPE_BIO_BASED || return (table_type == DM_TYPE_BIO_BASED ||
table_type == DM_TYPE_DAX_BIO_BASED || table_type == DM_TYPE_DAX_BIO_BASED);
table_type == DM_TYPE_NVME_BIO_BASED);
} }
static bool __table_type_request_based(enum dm_queue_mode table_type) static bool __table_type_request_based(enum dm_queue_mode table_type)
@ -898,8 +860,6 @@ bool dm_table_supports_dax(struct dm_table *t,
return true; return true;
} }
static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data) sector_t start, sector_t len, void *data)
{ {
@ -929,7 +889,6 @@ static int dm_table_determine_type(struct dm_table *t)
goto verify_bio_based; goto verify_bio_based;
} }
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
goto verify_rq_based; goto verify_rq_based;
} }
@ -968,15 +927,6 @@ verify_bio_based:
if (dm_table_supports_dax(t, device_supports_dax, &page_size) || if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED; t->type = DM_TYPE_DAX_BIO_BASED;
} else {
/* Check if upgrading to NVMe bio-based is valid or required */
tgt = dm_table_get_immutable_target(t);
if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
t->type = DM_TYPE_NVME_BIO_BASED;
goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
} else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
t->type = DM_TYPE_NVME_BIO_BASED;
}
} }
return 0; return 0;
} }
@ -993,8 +943,7 @@ verify_rq_based:
* (e.g. request completion process for partial completion.) * (e.g. request completion process for partial completion.)
*/ */
if (t->num_targets > 1) { if (t->num_targets > 1) {
DMERR("%s DM doesn't support multiple targets", DMERR("request-based DM doesn't support multiple targets");
t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
return -EINVAL; return -EINVAL;
} }
@ -1506,6 +1455,10 @@ int dm_calculate_queue_limits(struct dm_table *table,
zone_sectors = ti_limits.chunk_sectors; zone_sectors = ti_limits.chunk_sectors;
} }
/* Stack chunk_sectors if target-specific splitting is required */
if (ti->max_io_len)
ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len,
ti_limits.chunk_sectors);
/* Set I/O hints portion of queue limits */ /* Set I/O hints portion of queue limits */
if (ti->type->io_hints) if (ti->type->io_hints)
ti->type->io_hints(ti, &ti_limits); ti->type->io_hints(ti, &ti_limits);
@ -1684,20 +1637,6 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
return true; return true;
} }
static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
char b[BDEVNAME_SIZE];
/* For now, NVMe devices are the only devices of this class */
return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
}
static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
{
return dm_table_all_devices_attribute(t, device_no_partial_completion);
}
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data) sector_t start, sector_t len, void *data)
{ {
@ -2080,16 +2019,11 @@ EXPORT_SYMBOL_GPL(dm_table_device_name);
void dm_table_run_md_queue_async(struct dm_table *t) void dm_table_run_md_queue_async(struct dm_table *t)
{ {
struct mapped_device *md;
struct request_queue *queue;
if (!dm_table_request_based(t)) if (!dm_table_request_based(t))
return; return;
md = dm_table_get_md(t); if (t->md->queue)
queue = dm_get_md_queue(md); blk_mq_run_hw_queues(t->md->queue, true);
if (queue)
blk_mq_run_hw_queues(queue, true);
} }
EXPORT_SYMBOL(dm_table_run_md_queue_async); EXPORT_SYMBOL(dm_table_run_md_queue_async);

View File

@ -1051,12 +1051,11 @@ static int __create_thin(struct dm_pool_metadata *pmd,
int r; int r;
dm_block_t dev_root; dm_block_t dev_root;
uint64_t key = dev; uint64_t key = dev;
struct disk_device_details details_le;
struct dm_thin_device *td; struct dm_thin_device *td;
__le64 value; __le64 value;
r = dm_btree_lookup(&pmd->details_info, pmd->details_root, r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
&key, &details_le); &key, NULL);
if (!r) if (!r)
return -EEXIST; return -EEXIST;
@ -1129,12 +1128,11 @@ static int __create_snap(struct dm_pool_metadata *pmd,
dm_block_t origin_root; dm_block_t origin_root;
uint64_t key = origin, dev_key = dev; uint64_t key = origin, dev_key = dev;
struct dm_thin_device *td; struct dm_thin_device *td;
struct disk_device_details details_le;
__le64 value; __le64 value;
/* check this device is unused */ /* check this device is unused */
r = dm_btree_lookup(&pmd->details_info, pmd->details_root, r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
&dev_key, &details_le); &dev_key, NULL);
if (!r) if (!r)
return -EEXIST; return -EEXIST;

View File

@ -422,21 +422,6 @@ static void do_deferred_remove(struct work_struct *w)
dm_deferred_remove(); dm_deferred_remove();
} }
sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
}
struct request_queue *dm_get_md_queue(struct mapped_device *md)
{
return md->queue;
}
struct dm_stats *dm_get_stats(struct mapped_device *md)
{
return &md->stats;
}
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{ {
struct mapped_device *md = bdev->bd_disk->private_data; struct mapped_device *md = bdev->bd_disk->private_data;
@ -591,7 +576,44 @@ out:
return r; return r;
} }
static void start_io_acct(struct dm_io *io); u64 dm_start_time_ns_from_clone(struct bio *bio)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
return jiffies_to_nsecs(io->start_time);
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
io->start_time = bio_start_io_acct(bio);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
false, 0, &io->stats_aux);
}
static void end_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
bio_end_io_acct(bio, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
true, duration, &io->stats_aux);
/* nudge anyone waiting on suspend queue */
if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
}
static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
{ {
@ -657,45 +679,6 @@ static void free_tio(struct dm_target_io *tio)
bio_put(&tio->clone); bio_put(&tio->clone);
} }
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
return jiffies_to_nsecs(io->start_time);
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
io->start_time = bio_start_io_acct(bio);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
false, 0, &io->stats_aux);
}
static void end_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
bio_end_io_acct(bio, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
true, duration, &io->stats_aux);
/* nudge anyone waiting on suspend queue */
if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
}
/* /*
* Add the bio to the list of deferred io. * Add the bio to the list of deferred io.
*/ */
@ -992,7 +975,7 @@ static void clone_endio(struct bio *bio)
dm_endio_fn endio = tio->ti->type->end_io; dm_endio_fn endio = tio->ti->type->end_io;
struct bio *orig_bio = io->orig_bio; struct bio *orig_bio = io->orig_bio;
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD && if (bio_op(bio) == REQ_OP_DISCARD &&
!bio->bi_disk->queue->limits.max_discard_sectors) !bio->bi_disk->queue->limits.max_discard_sectors)
disable_discard(md); disable_discard(md);
@ -1041,32 +1024,28 @@ static void clone_endio(struct bio *bio)
* Return maximum size of I/O possible at the supplied sector up to the current * Return maximum size of I/O possible at the supplied sector up to the current
* target boundary. * target boundary.
*/ */
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
sector_t target_offset)
{ {
sector_t target_offset = dm_target_offset(ti, sector);
return ti->len - target_offset; return ti->len - target_offset;
} }
static sector_t max_io_len(sector_t sector, struct dm_target *ti) static sector_t max_io_len(struct dm_target *ti, sector_t sector)
{ {
sector_t len = max_io_len_target_boundary(sector, ti); sector_t target_offset = dm_target_offset(ti, sector);
sector_t offset, max_len; sector_t len = max_io_len_target_boundary(ti, target_offset);
sector_t max_len;
/* /*
* Does the target need to split even further? * Does the target need to split even further?
* - q->limits.chunk_sectors reflects ti->max_io_len so
* blk_max_size_offset() provides required splitting.
* - blk_max_size_offset() also respects q->limits.max_sectors
*/ */
if (ti->max_io_len) { max_len = blk_max_size_offset(ti->table->md->queue,
offset = dm_target_offset(ti, sector); target_offset);
if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) if (len > max_len)
max_len = sector_div(offset, ti->max_io_len); len = max_len;
else
max_len = offset & (ti->max_io_len - 1);
max_len = ti->max_io_len - max_len;
if (len > max_len)
len = max_len;
}
return len; return len;
} }
@ -1119,7 +1098,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
goto out; goto out;
if (!ti->type->direct_access) if (!ti->type->direct_access)
goto out; goto out;
len = max_io_len(sector, ti) / PAGE_SECTORS; len = max_io_len(ti, sector) / PAGE_SECTORS;
if (len < 1) if (len < 1)
goto out; goto out;
nr_pages = min(len, nr_pages); nr_pages = min(len, nr_pages);
@ -1431,6 +1410,17 @@ static int __send_empty_flush(struct clone_info *ci)
{ {
unsigned target_nr = 0; unsigned target_nr = 0;
struct dm_target *ti; struct dm_target *ti;
struct bio flush_bio;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
ci->bio = &flush_bio;
ci->sector_count = 0;
/* /*
* Empty flush uses a statically initialized bio, as the base for * Empty flush uses a statically initialized bio, as the base for
@ -1444,6 +1434,8 @@ static int __send_empty_flush(struct clone_info *ci)
BUG_ON(bio_has_data(ci->bio)); BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++))) while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
bio_uninit(ci->bio);
return 0; return 0;
} }
@ -1466,28 +1458,6 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
return 0; return 0;
} }
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
static unsigned get_num_discard_bios(struct dm_target *ti)
{
return ti->num_discard_bios;
}
static unsigned get_num_secure_erase_bios(struct dm_target *ti)
{
return ti->num_secure_erase_bios;
}
static unsigned get_num_write_same_bios(struct dm_target *ti)
{
return ti->num_write_same_bios;
}
static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
{
return ti->num_write_zeroes_bios;
}
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios) unsigned num_bios)
{ {
@ -1502,7 +1472,8 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *
if (!num_bios) if (!num_bios)
return -EOPNOTSUPP; return -EOPNOTSUPP;
len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); len = min_t(sector_t, ci->sector_count,
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
__send_duplicate_bios(ci, ti, num_bios, &len); __send_duplicate_bios(ci, ti, num_bios, &len);
@ -1512,26 +1483,6 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *
return 0; return 0;
} }
static int __send_discard(struct clone_info *ci, struct dm_target *ti)
{
return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
}
static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
{
return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
}
static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
{
return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
}
static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
{
return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
}
static bool is_abnormal_io(struct bio *bio) static bool is_abnormal_io(struct bio *bio)
{ {
bool r = false; bool r = false;
@ -1552,18 +1503,26 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
int *result) int *result)
{ {
struct bio *bio = ci->bio; struct bio *bio = ci->bio;
unsigned num_bios = 0;
if (bio_op(bio) == REQ_OP_DISCARD) switch (bio_op(bio)) {
*result = __send_discard(ci, ti); case REQ_OP_DISCARD:
else if (bio_op(bio) == REQ_OP_SECURE_ERASE) num_bios = ti->num_discard_bios;
*result = __send_secure_erase(ci, ti); break;
else if (bio_op(bio) == REQ_OP_WRITE_SAME) case REQ_OP_SECURE_ERASE:
*result = __send_write_same(ci, ti); num_bios = ti->num_secure_erase_bios;
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) break;
*result = __send_write_zeroes(ci, ti); case REQ_OP_WRITE_SAME:
else num_bios = ti->num_write_same_bios;
break;
case REQ_OP_WRITE_ZEROES:
num_bios = ti->num_write_zeroes_bios;
break;
default:
return false; return false;
}
*result = __send_changing_extent_only(ci, ti, num_bios);
return true; return true;
} }
@ -1583,7 +1542,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (__process_abnormal_io(ci, ti, &r)) if (__process_abnormal_io(ci, ti, &r))
return r; return r;
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
if (r < 0) if (r < 0)
@ -1619,19 +1578,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
init_clone_info(&ci, md, map, bio); init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) { if (bio->bi_opf & REQ_PREFLUSH) {
struct bio flush_bio;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci); error = __send_empty_flush(&ci);
bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */ /* dec_pending submits any data associated with flush */
} else if (op_is_zone_mgmt(bio_op(bio))) { } else if (op_is_zone_mgmt(bio_op(bio))) {
ci.bio = bio; ci.bio = bio;
@ -1680,88 +1627,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
return ret; return ret;
} }
/*
* Optimized variant of __split_and_process_bio that leverages the
* fact that targets that use it do _not_ have a need to split bios.
*/
static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
struct bio *bio, struct dm_target *ti)
{
struct clone_info ci;
blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
struct bio flush_bio;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else {
struct dm_target_io *tio;
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
if (__process_abnormal_io(&ci, ti, &error))
goto out;
tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
ret = __clone_and_map_simple_bio(&ci, tio, NULL);
}
out:
/* drop the extra reference count */
dec_pending(ci.io, errno_to_blk_status(error));
return ret;
}
static blk_qc_t dm_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
blk_qc_t ret = BLK_QC_T_NONE;
struct dm_target *ti = md->immutable_target;
if (unlikely(!map)) {
bio_io_error(bio);
return ret;
}
if (!ti) {
ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
if (unlikely(!ti)) {
bio_io_error(bio);
return ret;
}
}
/*
* If in ->submit_bio we need to use blk_queue_split(), otherwise
* queue_limits for abnormal requests (e.g. discard, writesame, etc)
* won't be imposed.
* If called from dm_wq_work() for deferred bio processing, bio
* was already handled by following code with previous ->submit_bio.
*/
if (current->bio_list) {
if (is_abnormal_io(bio))
blk_queue_split(&bio);
/* regular IO is split by __split_and_process_bio */
}
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
return __process_bio(md, map, bio, ti);
return __split_and_process_bio(md, map, bio);
}
static blk_qc_t dm_submit_bio(struct bio *bio) static blk_qc_t dm_submit_bio(struct bio *bio)
{ {
struct mapped_device *md = bio->bi_disk->private_data; struct mapped_device *md = bio->bi_disk->private_data;
@ -1769,35 +1634,34 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
int srcu_idx; int srcu_idx;
struct dm_table *map; struct dm_table *map;
if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { map = dm_get_live_table(md, &srcu_idx);
/* if (unlikely(!map)) {
* We are called with a live reference on q_usage_counter, but DMERR_LIMIT("%s: mapping table unavailable, erroring io",
* that one will be released as soon as we return. Grab an dm_device_name(md));
* extra one as blk_mq_submit_bio expects to be able to consume bio_io_error(bio);
* a reference (which lives until the request is freed in case a goto out;
* request is allocated).
*/
percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
return blk_mq_submit_bio(bio);
} }
map = dm_get_live_table(md, &srcu_idx); /* If suspended, queue this IO for later */
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);
if (bio->bi_opf & REQ_NOWAIT) if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio); bio_wouldblock_error(bio);
else if (!(bio->bi_opf & REQ_RAHEAD)) else if (bio->bi_opf & REQ_RAHEAD)
queue_io(md, bio);
else
bio_io_error(bio); bio_io_error(bio);
return ret; else
queue_io(md, bio);
goto out;
} }
ret = dm_process_bio(md, map, bio); /*
* Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
* otherwise associated queue_limits won't be imposed.
*/
if (is_abnormal_io(bio))
blk_queue_split(&bio);
ret = __split_and_process_bio(md, map, bio);
out:
dm_put_live_table(md, srcu_idx); dm_put_live_table(md, srcu_idx);
return ret; return ret;
} }
@ -1852,6 +1716,7 @@ static int next_free_minor(int *minor)
} }
static const struct block_device_operations dm_blk_dops; static const struct block_device_operations dm_blk_dops;
static const struct block_device_operations dm_rq_blk_dops;
static const struct dax_operations dm_dax_ops; static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work); static void dm_wq_work(struct work_struct *work);
@ -2121,12 +1986,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (request_based) if (request_based)
dm_stop_queue(q); dm_stop_queue(q);
if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { if (request_based) {
/* /*
* Leverage the fact that request-based DM targets and * Leverage the fact that request-based DM targets are
* NVMe bio based targets are immutable singletons * immutable singletons - used to optimize dm_mq_queue_rq.
* - used to optimize both dm_request_fn and dm_mq_queue_rq;
* and __process_bio.
*/ */
md->immutable_target = dm_table_get_immutable_target(t); md->immutable_target = dm_table_get_immutable_target(t);
} }
@ -2240,15 +2103,15 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) { switch (type) {
case DM_TYPE_REQUEST_BASED: case DM_TYPE_REQUEST_BASED:
md->disk->fops = &dm_rq_blk_dops;
r = dm_mq_init_request_queue(md, t); r = dm_mq_init_request_queue(md, t);
if (r) { if (r) {
DMERR("Cannot initialize queue for request-based dm-mq mapped device"); DMERR("Cannot initialize queue for request-based dm mapped device");
return r; return r;
} }
break; break;
case DM_TYPE_BIO_BASED: case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED: case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
break; break;
case DM_TYPE_NONE: case DM_TYPE_NONE:
WARN_ON_ONCE(true); WARN_ON_ONCE(true);
@ -2453,29 +2316,19 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
*/ */
static void dm_wq_work(struct work_struct *work) static void dm_wq_work(struct work_struct *work)
{ {
struct mapped_device *md = container_of(work, struct mapped_device, struct mapped_device *md = container_of(work, struct mapped_device, work);
work); struct bio *bio;
struct bio *c;
int srcu_idx;
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
spin_lock_irq(&md->deferred_lock); spin_lock_irq(&md->deferred_lock);
c = bio_list_pop(&md->deferred); bio = bio_list_pop(&md->deferred);
spin_unlock_irq(&md->deferred_lock); spin_unlock_irq(&md->deferred_lock);
if (!c) if (!bio)
break; break;
if (dm_request_based(md)) submit_bio_noacct(bio);
(void) submit_bio_noacct(c);
else
(void) dm_process_bio(md, map, c);
} }
dm_put_live_table(md, srcu_idx);
} }
static void dm_queue_flush(struct mapped_device *md) static void dm_queue_flush(struct mapped_device *md)
@ -2612,13 +2465,12 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
/* /*
* Here we must make sure that no processes are submitting requests * Here we must make sure that no processes are submitting requests
* to target drivers i.e. no one may be executing * to target drivers i.e. no one may be executing
* __split_and_process_bio. This is called from dm_request and * __split_and_process_bio from dm_submit_bio.
* dm_wq_work.
* *
* To get all processes out of __split_and_process_bio in dm_request, * To get all processes out of __split_and_process_bio in dm_submit_bio,
* we take the write lock. To prevent any process from reentering * we take the write lock. To prevent any process from reentering
* __split_and_process_bio from dm_request and quiesce the thread * __split_and_process_bio from dm_submit_bio and quiesce the thread
* (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
* flush_workqueue(md->wq). * flush_workqueue(md->wq).
*/ */
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
@ -2986,19 +2838,19 @@ int dm_test_deferred_remove_flag(struct mapped_device *md)
int dm_suspended(struct dm_target *ti) int dm_suspended(struct dm_target *ti)
{ {
return dm_suspended_md(dm_table_get_md(ti->table)); return dm_suspended_md(ti->table->md);
} }
EXPORT_SYMBOL_GPL(dm_suspended); EXPORT_SYMBOL_GPL(dm_suspended);
int dm_post_suspending(struct dm_target *ti) int dm_post_suspending(struct dm_target *ti)
{ {
return dm_post_suspending_md(dm_table_get_md(ti->table)); return dm_post_suspending_md(ti->table->md);
} }
EXPORT_SYMBOL_GPL(dm_post_suspending); EXPORT_SYMBOL_GPL(dm_post_suspending);
int dm_noflush_suspending(struct dm_target *ti) int dm_noflush_suspending(struct dm_target *ti)
{ {
return __noflush_suspending(dm_table_get_md(ti->table)); return __noflush_suspending(ti->table->md);
} }
EXPORT_SYMBOL_GPL(dm_noflush_suspending); EXPORT_SYMBOL_GPL(dm_noflush_suspending);
@ -3017,7 +2869,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
switch (type) { switch (type) {
case DM_TYPE_BIO_BASED: case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED: case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
@ -3235,6 +3086,15 @@ static const struct block_device_operations dm_blk_dops = {
.owner = THIS_MODULE .owner = THIS_MODULE
}; };
static const struct block_device_operations dm_rq_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
static const struct dax_operations dm_dax_ops = { static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access, .direct_access = dm_dax_direct_access,
.dax_supported = dm_dax_supported, .dax_supported = dm_dax_supported,

View File

@ -179,12 +179,9 @@ int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
int dm_cancel_deferred_remove(struct mapped_device *md); int dm_cancel_deferred_remove(struct mapped_device *md);
int dm_request_based(struct mapped_device *md); int dm_request_based(struct mapped_device *md);
sector_t dm_get_size(struct mapped_device *md);
struct request_queue *dm_get_md_queue(struct mapped_device *md);
int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
struct dm_dev **result); struct dm_dev **result);
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d); void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
struct dm_stats *dm_get_stats(struct mapped_device *md);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie); unsigned cookie);

View File

@ -366,7 +366,8 @@ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
} while (!(flags & LEAF_NODE)); } while (!(flags & LEAF_NODE));
*result_key = le64_to_cpu(ro_node(s)->keys[i]); *result_key = le64_to_cpu(ro_node(s)->keys[i]);
memcpy(v, value_ptr(ro_node(s), i), value_size); if (v)
memcpy(v, value_ptr(ro_node(s), i), value_size);
return 0; return 0;
} }

View File

@ -29,7 +29,6 @@ enum dm_queue_mode {
DM_TYPE_BIO_BASED = 1, DM_TYPE_BIO_BASED = 1,
DM_TYPE_REQUEST_BASED = 2, DM_TYPE_REQUEST_BASED = 2,
DM_TYPE_DAX_BIO_BASED = 3, DM_TYPE_DAX_BIO_BASED = 3,
DM_TYPE_NVME_BIO_BASED = 4,
}; };
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;

View File

@ -272,9 +272,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4 #define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 42 #define DM_VERSION_MINOR 43
#define DM_VERSION_PATCHLEVEL 0 #define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2020-02-27)" #define DM_VERSION_EXTRA "-ioctl (2020-10-01)"
/* Status bits */ /* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */