libnvdimm: nd_region flush callback support

This patch adds functionality to perform flush from guest
to host over VIRTIO. We are registering a callback based
on 'nd_region' type. virtio_pmem driver requires this special
flush function. For rest of the region types we are registering
existing flush function. Report error returned by host fsync
failure to userspace.

Signed-off-by: Pankaj Gupta <pagupta@redhat.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Pankaj Gupta 2019-07-05 19:33:22 +05:30 committed by Dan Williams
parent db5d00c93e
commit c5d4355d10
6 changed files with 47 additions and 12 deletions

View File

@ -2426,7 +2426,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
offset = to_interleave_offset(offset, mmio); offset = to_interleave_offset(offset, mmio);
writeq(cmd, mmio->addr.base + offset); writeq(cmd, mmio->addr.base + offset);
nvdimm_flush(nfit_blk->nd_region); nvdimm_flush(nfit_blk->nd_region, NULL);
if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
readq(mmio->addr.base + offset); readq(mmio->addr.base + offset);
@ -2475,7 +2475,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
} }
if (rw) if (rw)
nvdimm_flush(nfit_blk->nd_region); nvdimm_flush(nfit_blk->nd_region, NULL);
rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
return rc; return rc;

View File

@ -255,7 +255,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
sector_t sector = offset >> 9; sector_t sector = offset >> 9;
int rc = 0; int rc = 0, ret = 0;
if (unlikely(!size)) if (unlikely(!size))
return 0; return 0;
@ -293,7 +293,9 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
} }
memcpy_flushcache(nsio->addr + offset, buf, size); memcpy_flushcache(nsio->addr + offset, buf, size);
nvdimm_flush(to_nd_region(ndns->dev.parent)); ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL);
if (ret)
rc = ret;
return rc; return rc;
} }

View File

@ -155,6 +155,7 @@ struct nd_region {
struct badblocks bb; struct badblocks bb;
struct nd_interleave_set *nd_set; struct nd_interleave_set *nd_set;
struct nd_percpu_lane __percpu *lane; struct nd_percpu_lane __percpu *lane;
int (*flush)(struct nd_region *nd_region, struct bio *bio);
struct nd_mapping mapping[0]; struct nd_mapping mapping[0];
}; };

View File

@ -184,6 +184,7 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
{ {
int ret = 0;
blk_status_t rc = 0; blk_status_t rc = 0;
bool do_acct; bool do_acct;
unsigned long start; unsigned long start;
@ -193,7 +194,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
struct nd_region *nd_region = to_region(pmem); struct nd_region *nd_region = to_region(pmem);
if (bio->bi_opf & REQ_PREFLUSH) if (bio->bi_opf & REQ_PREFLUSH)
nvdimm_flush(nd_region); ret = nvdimm_flush(nd_region, bio);
do_acct = nd_iostat_start(bio, &start); do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
@ -208,7 +209,10 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
nd_iostat_end(bio, start); nd_iostat_end(bio, start);
if (bio->bi_opf & REQ_FUA) if (bio->bi_opf & REQ_FUA)
nvdimm_flush(nd_region); ret = nvdimm_flush(nd_region, bio);
if (ret)
bio->bi_status = errno_to_blk_status(ret);
bio_endio(bio); bio_endio(bio);
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
@ -477,7 +481,6 @@ static int pmem_attach_disk(struct device *dev,
} }
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region)); dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
pmem->dax_dev = dax_dev; pmem->dax_dev = dax_dev;
gendev = disk_to_dev(disk); gendev = disk_to_dev(disk);
gendev->groups = pmem_attribute_groups; gendev->groups = pmem_attribute_groups;
@ -535,14 +538,14 @@ static int nd_pmem_remove(struct device *dev)
sysfs_put(pmem->bb_state); sysfs_put(pmem->bb_state);
pmem->bb_state = NULL; pmem->bb_state = NULL;
} }
nvdimm_flush(to_nd_region(dev->parent)); nvdimm_flush(to_nd_region(dev->parent), NULL);
return 0; return 0;
} }
static void nd_pmem_shutdown(struct device *dev) static void nd_pmem_shutdown(struct device *dev)
{ {
nvdimm_flush(to_nd_region(dev->parent)); nvdimm_flush(to_nd_region(dev->parent), NULL);
} }
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)

View File

@ -287,7 +287,9 @@ static ssize_t deep_flush_store(struct device *dev, struct device_attribute *att
return rc; return rc;
if (!flush) if (!flush)
return -EINVAL; return -EINVAL;
nvdimm_flush(nd_region); rc = nvdimm_flush(nd_region, NULL);
if (rc)
return rc;
return len; return len;
} }
@ -1077,6 +1079,11 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
dev->of_node = ndr_desc->of_node; dev->of_node = ndr_desc->of_node;
nd_region->ndr_size = resource_size(ndr_desc->res); nd_region->ndr_size = resource_size(ndr_desc->res);
nd_region->ndr_start = ndr_desc->res->start; nd_region->ndr_start = ndr_desc->res->start;
if (ndr_desc->flush)
nd_region->flush = ndr_desc->flush;
else
nd_region->flush = NULL;
nd_device_register(dev); nd_device_register(dev);
return nd_region; return nd_region;
@ -1117,11 +1124,24 @@ struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
} }
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
{
int rc = 0;
if (!nd_region->flush)
rc = generic_nvdimm_flush(nd_region);
else {
if (nd_region->flush(nd_region, bio))
rc = -EIO;
}
return rc;
}
/** /**
* nvdimm_flush - flush any posted write queues between the cpu and pmem media * nvdimm_flush - flush any posted write queues between the cpu and pmem media
* @nd_region: blk or interleaved pmem region * @nd_region: blk or interleaved pmem region
*/ */
void nvdimm_flush(struct nd_region *nd_region) int generic_nvdimm_flush(struct nd_region *nd_region)
{ {
struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
int i, idx; int i, idx;
@ -1145,6 +1165,8 @@ void nvdimm_flush(struct nd_region *nd_region)
if (ndrd_get_flush_wpq(ndrd, i, 0)) if (ndrd_get_flush_wpq(ndrd, i, 0))
writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
wmb(); wmb();
return 0;
} }
EXPORT_SYMBOL_GPL(nvdimm_flush); EXPORT_SYMBOL_GPL(nvdimm_flush);

View File

@ -11,6 +11,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/uuid.h> #include <linux/uuid.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/bio.h>
struct badrange_entry { struct badrange_entry {
u64 start; u64 start;
@ -57,6 +58,9 @@ enum {
*/ */
ND_REGION_PERSIST_MEMCTRL = 2, ND_REGION_PERSIST_MEMCTRL = 2,
/* Platform provides asynchronous flush mechanism */
ND_REGION_ASYNC = 3,
/* mark newly adjusted resources as requiring a label update */ /* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0, DPA_RESOURCE_ADJUSTED = 1 << 0,
}; };
@ -113,6 +117,7 @@ struct nd_mapping_desc {
int position; int position;
}; };
struct nd_region;
struct nd_region_desc { struct nd_region_desc {
struct resource *res; struct resource *res;
struct nd_mapping_desc *mapping; struct nd_mapping_desc *mapping;
@ -125,6 +130,7 @@ struct nd_region_desc {
int target_node; int target_node;
unsigned long flags; unsigned long flags;
struct device_node *of_node; struct device_node *of_node;
int (*flush)(struct nd_region *nd_region, struct bio *bio);
}; };
struct device; struct device;
@ -252,7 +258,8 @@ unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region); unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le); u64 nd_fletcher64(void *addr, size_t len, bool le);
void nvdimm_flush(struct nd_region *nd_region); int nvdimm_flush(struct nd_region *nd_region, struct bio *bio);
int generic_nvdimm_flush(struct nd_region *nd_region);
int nvdimm_has_flush(struct nd_region *nd_region); int nvdimm_has_flush(struct nd_region *nd_region);
int nvdimm_has_cache(struct nd_region *nd_region); int nvdimm_has_cache(struct nd_region *nd_region);
int nvdimm_in_overwrite(struct nvdimm *nvdimm); int nvdimm_in_overwrite(struct nvdimm *nvdimm);