dax: introduce DAX_RECOVERY_WRITE dax access mode
Up till now, dax_direct_access() is used implicitly for normal access, but for the purpose of recovery write, dax range with poison is requested. To make the interface clear, introduce enum dax_access_mode { DAX_ACCESS, DAX_RECOVERY_WRITE, } where DAX_ACCESS is used for normal dax access, and DAX_RECOVERY_WRITE is used for dax recovery write. Suggested-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jane Chu <jane.chu@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Mike Snitzer <snitzer@redhat.com> Reviewed-by: Vivek Goyal <vgoyal@redhat.com> Link: https://lore.kernel.org/r/165247982851.52965.11024212198889762949.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
5898b43af9
commit
e511c4a3d2
|
@ -117,6 +117,7 @@ enum dax_device_flags {
|
||||||
* @dax_dev: a dax_device instance representing the logical memory range
|
* @dax_dev: a dax_device instance representing the logical memory range
|
||||||
* @pgoff: offset in pages from the start of the device to translate
|
* @pgoff: offset in pages from the start of the device to translate
|
||||||
* @nr_pages: number of consecutive pages caller can handle relative to @pfn
|
* @nr_pages: number of consecutive pages caller can handle relative to @pfn
|
||||||
|
* @mode: indicator on normal access or recovery write
|
||||||
* @kaddr: output parameter that returns a virtual address mapping of pfn
|
* @kaddr: output parameter that returns a virtual address mapping of pfn
|
||||||
* @pfn: output parameter that returns an absolute pfn translation of @pgoff
|
* @pfn: output parameter that returns an absolute pfn translation of @pgoff
|
||||||
*
|
*
|
||||||
|
@ -124,7 +125,7 @@ enum dax_device_flags {
|
||||||
* pages accessible at the device relative @pgoff.
|
* pages accessible at the device relative @pgoff.
|
||||||
*/
|
*/
|
||||||
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||||
void **kaddr, pfn_t *pfn)
|
enum dax_access_mode mode, void **kaddr, pfn_t *pfn)
|
||||||
{
|
{
|
||||||
long avail;
|
long avail;
|
||||||
|
|
||||||
|
@ -138,7 +139,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
|
avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
|
||||||
kaddr, pfn);
|
mode, kaddr, pfn);
|
||||||
if (!avail)
|
if (!avail)
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
return min(avail, nr_pages);
|
return min(avail, nr_pages);
|
||||||
|
|
|
@ -172,11 +172,12 @@ static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
|
||||||
}
|
}
|
||||||
|
|
||||||
static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
|
pfn_t *pfn)
|
||||||
{
|
{
|
||||||
struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
|
struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
|
||||||
|
|
||||||
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
|
return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
|
static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
|
||||||
|
|
|
@ -889,11 +889,12 @@ static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
|
||||||
}
|
}
|
||||||
|
|
||||||
static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
|
pfn_t *pfn)
|
||||||
{
|
{
|
||||||
struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
|
struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
|
||||||
|
|
||||||
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
|
return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
|
static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
|
||||||
|
|
|
@ -315,11 +315,12 @@ static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
|
||||||
}
|
}
|
||||||
|
|
||||||
static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
|
pfn_t *pfn)
|
||||||
{
|
{
|
||||||
struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
|
struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
|
||||||
|
|
||||||
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
|
return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
|
static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kmod.h>
|
#include <linux/kmod.h>
|
||||||
#include <linux/bio.h>
|
#include <linux/bio.h>
|
||||||
|
#include <linux/dax.h>
|
||||||
|
|
||||||
#define DM_MSG_PREFIX "target"
|
#define DM_MSG_PREFIX "target"
|
||||||
|
|
||||||
|
@ -142,7 +143,8 @@ static void io_err_release_clone_rq(struct request *clone,
|
||||||
}
|
}
|
||||||
|
|
||||||
static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
|
pfn_t *pfn)
|
||||||
{
|
{
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
|
@ -286,7 +286,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
||||||
|
|
||||||
id = dax_read_lock();
|
id = dax_read_lock();
|
||||||
|
|
||||||
da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
|
da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, DAX_ACCESS,
|
||||||
|
&wc->memory_map, &pfn);
|
||||||
if (da < 0) {
|
if (da < 0) {
|
||||||
wc->memory_map = NULL;
|
wc->memory_map = NULL;
|
||||||
r = da;
|
r = da;
|
||||||
|
@ -308,8 +309,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
||||||
i = 0;
|
i = 0;
|
||||||
do {
|
do {
|
||||||
long daa;
|
long daa;
|
||||||
daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
|
daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i,
|
||||||
NULL, &pfn);
|
p - i, DAX_ACCESS, NULL, &pfn);
|
||||||
if (daa <= 0) {
|
if (daa <= 0) {
|
||||||
r = daa ? daa : -EINVAL;
|
r = daa ? daa : -EINVAL;
|
||||||
goto err3;
|
goto err3;
|
||||||
|
|
|
@ -1093,7 +1093,8 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
|
||||||
}
|
}
|
||||||
|
|
||||||
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
|
pfn_t *pfn)
|
||||||
{
|
{
|
||||||
struct mapped_device *md = dax_get_private(dax_dev);
|
struct mapped_device *md = dax_get_private(dax_dev);
|
||||||
sector_t sector = pgoff * PAGE_SECTORS;
|
sector_t sector = pgoff * PAGE_SECTORS;
|
||||||
|
@ -1111,7 +1112,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
if (len < 1)
|
if (len < 1)
|
||||||
goto out;
|
goto out;
|
||||||
nr_pages = min(len, nr_pages);
|
nr_pages = min(len, nr_pages);
|
||||||
ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
|
ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
dm_put_live_table(md, srcu_idx);
|
dm_put_live_table(md, srcu_idx);
|
||||||
|
|
|
@ -239,7 +239,8 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
||||||
|
|
||||||
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
|
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
|
||||||
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
|
pfn_t *pfn)
|
||||||
{
|
{
|
||||||
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
||||||
|
|
||||||
|
@ -278,11 +279,12 @@ static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
}
|
}
|
||||||
|
|
||||||
static long pmem_dax_direct_access(struct dax_device *dax_dev,
|
static long pmem_dax_direct_access(struct dax_device *dax_dev,
|
||||||
pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
|
pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
|
||||||
|
void **kaddr, pfn_t *pfn)
|
||||||
{
|
{
|
||||||
struct pmem_device *pmem = dax_get_private(dax_dev);
|
struct pmem_device *pmem = dax_get_private(dax_dev);
|
||||||
|
|
||||||
return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
|
return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dax_operations pmem_dax_ops = {
|
static const struct dax_operations pmem_dax_ops = {
|
||||||
|
|
|
@ -8,6 +8,8 @@
|
||||||
#include <linux/pfn_t.h>
|
#include <linux/pfn_t.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
|
||||||
|
enum dax_access_mode;
|
||||||
|
|
||||||
/* this definition is in it's own header for tools/testing/nvdimm to consume */
|
/* this definition is in it's own header for tools/testing/nvdimm to consume */
|
||||||
struct pmem_device {
|
struct pmem_device {
|
||||||
/* One contiguous memory region per device */
|
/* One contiguous memory region per device */
|
||||||
|
@ -28,7 +30,8 @@ struct pmem_device {
|
||||||
};
|
};
|
||||||
|
|
||||||
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn);
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
|
pfn_t *pfn);
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_FAILURE
|
#ifdef CONFIG_MEMORY_FAILURE
|
||||||
static inline bool test_and_clear_pmem_poison(struct page *page)
|
static inline bool test_and_clear_pmem_poison(struct page *page)
|
||||||
|
|
|
@ -32,7 +32,8 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode);
|
||||||
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
|
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
|
||||||
static void dcssblk_submit_bio(struct bio *bio);
|
static void dcssblk_submit_bio(struct bio *bio);
|
||||||
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn);
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
|
pfn_t *pfn);
|
||||||
|
|
||||||
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
|
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
|
||||||
|
|
||||||
|
@ -50,7 +51,8 @@ static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
|
||||||
long rc;
|
long rc;
|
||||||
void *kaddr;
|
void *kaddr;
|
||||||
|
|
||||||
rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
|
rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS,
|
||||||
|
&kaddr, NULL);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
|
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
|
||||||
|
@ -927,7 +929,8 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
|
||||||
|
|
||||||
static long
|
static long
|
||||||
dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
|
pfn_t *pfn)
|
||||||
{
|
{
|
||||||
struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
|
struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
|
||||||
|
|
||||||
|
|
9
fs/dax.c
9
fs/dax.c
|
@ -721,7 +721,8 @@ static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter
|
||||||
int id;
|
int id;
|
||||||
|
|
||||||
id = dax_read_lock();
|
id = dax_read_lock();
|
||||||
rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL);
|
rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
|
||||||
|
&kaddr, NULL);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
dax_read_unlock(id);
|
dax_read_unlock(id);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -1013,7 +1014,7 @@ static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
|
||||||
|
|
||||||
id = dax_read_lock();
|
id = dax_read_lock();
|
||||||
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
|
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
|
||||||
NULL, pfnp);
|
DAX_ACCESS, NULL, pfnp);
|
||||||
if (length < 0) {
|
if (length < 0) {
|
||||||
rc = length;
|
rc = length;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1122,7 +1123,7 @@ static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
void *kaddr;
|
void *kaddr;
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
ret = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
|
ret = dax_direct_access(dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL);
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
memset(kaddr + offset, 0, size);
|
memset(kaddr + offset, 0, size);
|
||||||
dax_flush(dax_dev, kaddr + offset, size);
|
dax_flush(dax_dev, kaddr + offset, size);
|
||||||
|
@ -1247,7 +1248,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
|
||||||
}
|
}
|
||||||
|
|
||||||
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
|
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
|
||||||
&kaddr, NULL);
|
DAX_ACCESS, &kaddr, NULL);
|
||||||
if (map_len < 0) {
|
if (map_len < 0) {
|
||||||
ret = map_len;
|
ret = map_len;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1241,8 +1241,8 @@ static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
|
||||||
INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
|
INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
|
||||||
|
|
||||||
id = dax_read_lock();
|
id = dax_read_lock();
|
||||||
nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), NULL,
|
nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size),
|
||||||
NULL);
|
DAX_ACCESS, NULL, NULL);
|
||||||
dax_read_unlock(id);
|
dax_read_unlock(id);
|
||||||
if (nr_pages < 0) {
|
if (nr_pages < 0) {
|
||||||
pr_debug("dax_direct_access() returned %ld\n", nr_pages);
|
pr_debug("dax_direct_access() returned %ld\n", nr_pages);
|
||||||
|
|
|
@ -752,7 +752,8 @@ static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
|
||||||
* offset.
|
* offset.
|
||||||
*/
|
*/
|
||||||
static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
long nr_pages, enum dax_access_mode mode,
|
||||||
|
void **kaddr, pfn_t *pfn)
|
||||||
{
|
{
|
||||||
struct virtio_fs *fs = dax_get_private(dax_dev);
|
struct virtio_fs *fs = dax_get_private(dax_dev);
|
||||||
phys_addr_t offset = PFN_PHYS(pgoff);
|
phys_addr_t offset = PFN_PHYS(pgoff);
|
||||||
|
@ -772,7 +773,8 @@ static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
|
||||||
long rc;
|
long rc;
|
||||||
void *kaddr;
|
void *kaddr;
|
||||||
|
|
||||||
rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
|
rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, &kaddr,
|
||||||
|
NULL);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
|
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
|
||||||
|
|
|
@ -14,6 +14,11 @@ struct iomap_ops;
|
||||||
struct iomap_iter;
|
struct iomap_iter;
|
||||||
struct iomap;
|
struct iomap;
|
||||||
|
|
||||||
|
enum dax_access_mode {
|
||||||
|
DAX_ACCESS,
|
||||||
|
DAX_RECOVERY_WRITE,
|
||||||
|
};
|
||||||
|
|
||||||
struct dax_operations {
|
struct dax_operations {
|
||||||
/*
|
/*
|
||||||
* direct_access: translate a device-relative
|
* direct_access: translate a device-relative
|
||||||
|
@ -21,7 +26,7 @@ struct dax_operations {
|
||||||
* number of pages available for DAX at that pfn.
|
* number of pages available for DAX at that pfn.
|
||||||
*/
|
*/
|
||||||
long (*direct_access)(struct dax_device *, pgoff_t, long,
|
long (*direct_access)(struct dax_device *, pgoff_t, long,
|
||||||
void **, pfn_t *);
|
enum dax_access_mode, void **, pfn_t *);
|
||||||
/*
|
/*
|
||||||
* Validate whether this device is usable as an fsdax backing
|
* Validate whether this device is usable as an fsdax backing
|
||||||
* device.
|
* device.
|
||||||
|
@ -178,7 +183,7 @@ static inline void dax_read_unlock(int id)
|
||||||
bool dax_alive(struct dax_device *dax_dev);
|
bool dax_alive(struct dax_device *dax_dev);
|
||||||
void *dax_get_private(struct dax_device *dax_dev);
|
void *dax_get_private(struct dax_device *dax_dev);
|
||||||
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||||
void **kaddr, pfn_t *pfn);
|
enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
|
||||||
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||||
size_t bytes, struct iov_iter *i);
|
size_t bytes, struct iov_iter *i);
|
||||||
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||||
|
|
|
@ -20,6 +20,7 @@ struct dm_table;
|
||||||
struct dm_report_zones_args;
|
struct dm_report_zones_args;
|
||||||
struct mapped_device;
|
struct mapped_device;
|
||||||
struct bio_vec;
|
struct bio_vec;
|
||||||
|
enum dax_access_mode;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Type of table, mapped_device's mempool and request_queue
|
* Type of table, mapped_device's mempool and request_queue
|
||||||
|
@ -146,7 +147,8 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
|
||||||
* >= 0 : the number of bytes accessible at the address
|
* >= 0 : the number of bytes accessible at the address
|
||||||
*/
|
*/
|
||||||
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
|
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn);
|
long nr_pages, enum dax_access_mode node, void **kaddr,
|
||||||
|
pfn_t *pfn);
|
||||||
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
|
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
|
||||||
size_t nr_pages);
|
size_t nr_pages);
|
||||||
|
|
||||||
|
|
|
@ -4,11 +4,13 @@
|
||||||
*/
|
*/
|
||||||
#include "test/nfit_test.h"
|
#include "test/nfit_test.h"
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
|
#include <linux/dax.h>
|
||||||
#include <pmem.h>
|
#include <pmem.h>
|
||||||
#include <nd.h>
|
#include <nd.h>
|
||||||
|
|
||||||
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
|
pfn_t *pfn)
|
||||||
{
|
{
|
||||||
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue