- Fix DM writecache target to allow an optional offset to the start of
the data and metadata area. This allows userspace tools (e.g. LVM2) to place a header and metadata at the front of the writecache device for its use. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJbUjNdAAoJEMUj8QotnQNa3NsH/RfalVY4y+Y8TSyfz20oHHcf Gxr0dMKQPYYeuAMxqcVyn9IhowZpcoBNbR050NEDpBjsE2augf0t2Ixl08gOn6Hx QJECHxqMQsIcSnvQLqqf4DGXU78WiacbYlLXI+7fnvddWrV+VRALSDxiuQAbCKue Oj/LSsRh/zhf9ruMWURrHpy8k/GOEZzrhgH3as6OZcYgLYaakv3yGY42vPmSbIj+ RXmTzfJzxxxkWXAg6IIqawFlxZunLkcOhd1jSAE46Gh8NGe4jbP6U3AprMyp2yyB E388aggr7CgZqsCWp84EcAWa30f0G9+XlUFqB4ydQO/Mp39WH3VX2r5QBLBo2+8= =BN6D -----END PGP SIGNATURE----- Merge tag 'for-4.18/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fix from Mike Snitzer: "Fix DM writecache target to allow an optional offset to the start of the data and metadata area. This allows userspace tools (e.g. LVM2) to place a header and metadata at the front of the writecache device for its use" * tag 'for-4.18/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm writecache: support optional offset for start of device
This commit is contained in:
commit
b4460a9586
|
@ -15,6 +15,8 @@ Constructor parameters:
|
|||
size)
|
||||
5. the number of optional parameters (the parameters with an argument
|
||||
count as two)
|
||||
start_sector n (default: 0)
|
||||
offset from the start of cache device in 512-byte sectors
|
||||
high_watermark n (default: 50)
|
||||
start writeback when the number of used blocks reach this
|
||||
watermark
|
||||
|
|
|
@ -136,6 +136,7 @@ struct dm_writecache {
|
|||
struct dm_target *ti;
|
||||
struct dm_dev *dev;
|
||||
struct dm_dev *ssd_dev;
|
||||
sector_t start_sector;
|
||||
void *memory_map;
|
||||
uint64_t memory_map_size;
|
||||
size_t metadata_sectors;
|
||||
|
@ -293,6 +294,10 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
|||
}
|
||||
|
||||
dax_read_unlock(id);
|
||||
|
||||
wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
|
||||
wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
|
||||
|
||||
return 0;
|
||||
err3:
|
||||
kvfree(pages);
|
||||
|
@ -311,7 +316,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
|||
static void persistent_memory_release(struct dm_writecache *wc)
|
||||
{
|
||||
if (wc->memory_vmapped)
|
||||
vunmap(wc->memory_map);
|
||||
vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
|
||||
}
|
||||
|
||||
static struct page *persistent_memory_page(void *addr)
|
||||
|
@ -359,7 +364,7 @@ static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
|
|||
|
||||
static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
|
||||
{
|
||||
return wc->metadata_sectors +
|
||||
return wc->start_sector + wc->metadata_sectors +
|
||||
((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
|
||||
}
|
||||
|
||||
|
@ -471,6 +476,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
|
|||
if (unlikely(region.sector + region.count > wc->metadata_sectors))
|
||||
region.count = wc->metadata_sectors - region.sector;
|
||||
|
||||
region.sector += wc->start_sector;
|
||||
atomic_inc(&endio.count);
|
||||
req.bi_op = REQ_OP_WRITE;
|
||||
req.bi_op_flags = REQ_SYNC;
|
||||
|
@ -1946,14 +1952,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
}
|
||||
wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
|
||||
|
||||
if (WC_MODE_PMEM(wc)) {
|
||||
r = persistent_memory_claim(wc);
|
||||
if (r) {
|
||||
ti->error = "Unable to map persistent memory for cache";
|
||||
goto bad;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse the cache block size
|
||||
*/
|
||||
|
@ -1982,7 +1980,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
|
||||
while (opt_params) {
|
||||
string = dm_shift_arg(&as), opt_params--;
|
||||
if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
|
||||
if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
|
||||
unsigned long long start_sector;
|
||||
string = dm_shift_arg(&as), opt_params--;
|
||||
if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
|
||||
goto invalid_optional;
|
||||
wc->start_sector = start_sector;
|
||||
if (wc->start_sector != start_sector ||
|
||||
wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
|
||||
goto invalid_optional;
|
||||
} else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
|
||||
string = dm_shift_arg(&as), opt_params--;
|
||||
if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
|
||||
goto invalid_optional;
|
||||
|
@ -2039,12 +2046,20 @@ invalid_optional:
|
|||
goto bad;
|
||||
}
|
||||
|
||||
if (!WC_MODE_PMEM(wc)) {
|
||||
if (WC_MODE_PMEM(wc)) {
|
||||
r = persistent_memory_claim(wc);
|
||||
if (r) {
|
||||
ti->error = "Unable to map persistent memory for cache";
|
||||
goto bad;
|
||||
}
|
||||
} else {
|
||||
struct dm_io_region region;
|
||||
struct dm_io_request req;
|
||||
size_t n_blocks, n_metadata_blocks;
|
||||
uint64_t n_bitmap_bits;
|
||||
|
||||
wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
|
||||
|
||||
bio_list_init(&wc->flush_list);
|
||||
wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
|
||||
if (IS_ERR(wc->flush_thread)) {
|
||||
|
@ -2097,7 +2112,7 @@ invalid_optional:
|
|||
}
|
||||
|
||||
region.bdev = wc->ssd_dev->bdev;
|
||||
region.sector = 0;
|
||||
region.sector = wc->start_sector;
|
||||
region.count = wc->metadata_sectors;
|
||||
req.bi_op = REQ_OP_READ;
|
||||
req.bi_op_flags = REQ_SYNC;
|
||||
|
@ -2265,7 +2280,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
|
|||
|
||||
static struct target_type writecache_target = {
|
||||
.name = "writecache",
|
||||
.version = {1, 0, 0},
|
||||
.version = {1, 1, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = writecache_ctr,
|
||||
.dtr = writecache_dtr,
|
||||
|
|
Loading…
Reference in New Issue