- A stable fix for DM integrity to use kvfree.
- Fix for a 4.17-rc1 change to dm-bufio's buffer alignment. - Fixes for a few sparse warnings. - Remove VLA usage in DM mirror target. - Improve DM thinp Documentation for the "read_only" feature. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJa9Gn4AAoJEMUj8QotnQNa+6sH/0lu9XA7fpaopD29wPkjtFLg nDY8yCBrBvo1GRQBCAM3TWYDdTYLgO0srn24y9AM0AnhuiR+YFeDuoMyhzIQjWay X+RGgiMtLwroLWO9t7hhP1eK3u3SX+40bhvle6vNOn/KGb7XOuFnEksUJ85B9pJZ xF1aGos8+YIXTqBRP4RLJPWKPme1HIpdVGUcwnt9fW3J9PYzkN9xIry/cow0JWEl xmr69l2KpOQ7jVpcBhA52NDosW/LCOipyr9mhe0+lq60BDcsCbjCkK5p6F38Ufa8 +24cqgdT2fdbogL3JTfs0lIgrOaLOhYmL5qJemopcxE6TKXvP6AzHhihlT48fNA= =w2u8 -----END PGP SIGNATURE----- Merge tag 'for-4.17/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: - a stable fix for DM integrity to use kvfree - fix for a 4.17-rc1 change to dm-bufio's buffer alignment - fixes for a few sparse warnings - remove VLA usage in DM mirror target - improve DM thinp Documentation for the "read_only" feature * tag 'for-4.17/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm thin: update Documentation to clarify when "read_only" is valid dm mirror: remove VLA usage dm: fix some sparse warnings and whitespace in dax methods dm cache background tracker: fix sparse warning dm bufio: fix buffer alignment dm integrity: use kvfree for kvmalloc'd memory
This commit is contained in:
commit
94d7dbf108
|
@ -264,7 +264,10 @@ i) Constructor
|
|||
data device, but just remove the mapping.
|
||||
|
||||
read_only: Don't allow any changes to be made to the pool
|
||||
metadata.
|
||||
metadata. This mode is only available after the
|
||||
thin-pool has been created and first used in full
|
||||
read/write mode. It cannot be specified on initial
|
||||
thin-pool creation.
|
||||
|
||||
error_if_no_space: Error IOs, instead of queueing, if no space.
|
||||
|
||||
|
|
|
@ -1681,8 +1681,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
|
|||
|
||||
if (block_size <= KMALLOC_MAX_SIZE &&
|
||||
(block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
|
||||
snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", c->block_size);
|
||||
c->slab_cache = kmem_cache_create(slab_name, c->block_size, ARCH_KMALLOC_MINALIGN,
|
||||
unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
|
||||
snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
|
||||
c->slab_cache = kmem_cache_create(slab_name, block_size, align,
|
||||
SLAB_RECLAIM_ACCOUNT, NULL);
|
||||
if (!c->slab_cache) {
|
||||
r = -ENOMEM;
|
||||
|
|
|
@ -166,7 +166,7 @@ static bool max_work_reached(struct background_tracker *b)
|
|||
atomic_read(&b->pending_demotes) >= b->max_work;
|
||||
}
|
||||
|
||||
struct bt_work *alloc_work(struct background_tracker *b)
|
||||
static struct bt_work *alloc_work(struct background_tracker *b)
|
||||
{
|
||||
if (max_work_reached(b))
|
||||
return NULL;
|
||||
|
|
|
@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str
|
|||
unsigned i;
|
||||
for (i = 0; i < ic->journal_sections; i++)
|
||||
kvfree(sl[i]);
|
||||
kfree(sl);
|
||||
kvfree(sl);
|
||||
}
|
||||
|
||||
static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
|
||||
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
|
||||
|
||||
#define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1)
|
||||
|
||||
#define DM_RAID1_HANDLE_ERRORS 0x01
|
||||
#define DM_RAID1_KEEP_LOG 0x02
|
||||
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
|
||||
|
@ -255,7 +257,7 @@ static int mirror_flush(struct dm_target *ti)
|
|||
unsigned long error_bits;
|
||||
|
||||
unsigned int i;
|
||||
struct dm_io_region io[ms->nr_mirrors];
|
||||
struct dm_io_region io[MAX_NR_MIRRORS];
|
||||
struct mirror *m;
|
||||
struct dm_io_request io_req = {
|
||||
.bi_op = REQ_OP_WRITE,
|
||||
|
@ -651,7 +653,7 @@ static void write_callback(unsigned long error, void *context)
|
|||
static void do_write(struct mirror_set *ms, struct bio *bio)
|
||||
{
|
||||
unsigned int i;
|
||||
struct dm_io_region io[ms->nr_mirrors], *dest = io;
|
||||
struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
|
||||
struct mirror *m;
|
||||
struct dm_io_request io_req = {
|
||||
.bi_op = REQ_OP_WRITE,
|
||||
|
@ -1083,7 +1085,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
argc -= args_used;
|
||||
|
||||
if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
|
||||
nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
|
||||
nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
|
||||
ti->error = "Invalid number of mirrors";
|
||||
dm_dirty_log_destroy(dl);
|
||||
return -EINVAL;
|
||||
|
@ -1404,7 +1406,7 @@ static void mirror_status(struct dm_target *ti, status_type_t type,
|
|||
int num_feature_args = 0;
|
||||
struct mirror_set *ms = (struct mirror_set *) ti->private;
|
||||
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
|
||||
char buffer[ms->nr_mirrors + 1];
|
||||
char buffer[MAX_NR_MIRRORS + 1];
|
||||
|
||||
switch (type) {
|
||||
case STATUSTYPE_INFO:
|
||||
|
|
|
@ -1020,7 +1020,8 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
|
|||
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
|
||||
|
||||
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
|
||||
sector_t sector, int *srcu_idx)
|
||||
sector_t sector, int *srcu_idx)
|
||||
__acquires(md->io_barrier)
|
||||
{
|
||||
struct dm_table *map;
|
||||
struct dm_target *ti;
|
||||
|
@ -1037,7 +1038,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
|
|||
}
|
||||
|
||||
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct mapped_device *md = dax_get_private(dax_dev);
|
||||
sector_t sector = pgoff * PAGE_SECTORS;
|
||||
|
@ -1065,7 +1066,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
|||
}
|
||||
|
||||
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||
void *addr, size_t bytes, struct iov_iter *i)
|
||||
void *addr, size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
struct mapped_device *md = dax_get_private(dax_dev);
|
||||
sector_t sector = pgoff * PAGE_SECTORS;
|
||||
|
|
Loading…
Reference in New Issue