2019-05-29 22:18:09 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2015-06-01 02:41:48 +08:00
|
|
|
/*
|
|
|
|
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
|
|
|
|
*/
|
|
|
|
#ifndef __LINUX_ND_H__
|
|
|
|
#define __LINUX_ND_H__
|
2015-06-25 16:20:04 +08:00
|
|
|
#include <linux/fs.h>
|
2015-06-01 02:41:48 +08:00
|
|
|
#include <linux/ndctl.h>
|
|
|
|
#include <linux/device.h>
|
2016-03-22 15:22:16 +08:00
|
|
|
#include <linux/badblocks.h>
|
2022-02-25 22:30:21 +08:00
|
|
|
#include <linux/perf_event.h>
|
2015-06-01 02:41:48 +08:00
|
|
|
|
2016-02-19 02:29:49 +08:00
|
|
|
enum nvdimm_event {
|
|
|
|
NVDIMM_REVALIDATE_POISON,
|
2021-03-10 09:43:38 +08:00
|
|
|
NVDIMM_REVALIDATE_REGION,
|
2016-02-19 02:29:49 +08:00
|
|
|
};
|
|
|
|
|
2017-06-04 09:18:39 +08:00
|
|
|
enum nvdimm_claim_class {
|
|
|
|
NVDIMM_CCLASS_NONE,
|
|
|
|
NVDIMM_CCLASS_BTT,
|
2017-06-29 04:25:00 +08:00
|
|
|
NVDIMM_CCLASS_BTT2,
|
2017-06-04 09:18:39 +08:00
|
|
|
NVDIMM_CCLASS_PFN,
|
|
|
|
NVDIMM_CCLASS_DAX,
|
|
|
|
NVDIMM_CCLASS_UNKNOWN,
|
|
|
|
};
|
|
|
|
|
2022-02-25 22:30:22 +08:00
|
|
|
#define NVDIMM_EVENT_VAR(_id) event_attr_##_id
|
|
|
|
#define NVDIMM_EVENT_PTR(_id) (&event_attr_##_id.attr.attr)
|
|
|
|
|
|
|
|
#define NVDIMM_EVENT_ATTR(_name, _id) \
|
|
|
|
PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id, \
|
|
|
|
nvdimm_events_sysfs_show)
|
|
|
|
|
|
|
|
/* Event attribute array index */
|
|
|
|
#define NVDIMM_PMU_FORMAT_ATTR 0
|
|
|
|
#define NVDIMM_PMU_EVENT_ATTR 1
|
|
|
|
#define NVDIMM_PMU_CPUMASK_ATTR 2
|
|
|
|
#define NVDIMM_PMU_NULL_ATTR 3
|
|
|
|
|
2022-02-25 22:30:21 +08:00
|
|
|
/**
|
|
|
|
* struct nvdimm_pmu - data structure for nvdimm perf driver
|
|
|
|
* @pmu: pmu data structure for nvdimm performance stats.
|
|
|
|
* @dev: nvdimm device pointer.
|
|
|
|
* @cpu: designated cpu for counter access.
|
|
|
|
* @node: node for cpu hotplug notifier link.
|
|
|
|
* @cpuhp_state: state for cpu hotplug notification.
|
|
|
|
* @arch_cpumask: cpumask to get designated cpu for counter access.
|
|
|
|
*/
|
|
|
|
struct nvdimm_pmu {
|
|
|
|
struct pmu pmu;
|
|
|
|
struct device *dev;
|
|
|
|
int cpu;
|
|
|
|
struct hlist_node node;
|
|
|
|
enum cpuhp_state cpuhp_state;
|
|
|
|
/* cpumask provided by arch/platform specific code */
|
|
|
|
struct cpumask arch_cpumask;
|
|
|
|
};
|
|
|
|
|
2022-03-24 00:45:49 +08:00
|
|
|
struct platform_device;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2022-02-25 22:30:22 +08:00
|
|
|
extern ssize_t nvdimm_events_sysfs_show(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *page);
|
|
|
|
|
|
|
|
int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev);
|
|
|
|
void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu);
|
2022-03-24 00:45:49 +08:00
|
|
|
|
|
|
|
#else
|
|
|
|
static inline int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu) { }
|
|
|
|
#endif
|
2022-02-25 22:30:22 +08:00
|
|
|
|
2015-06-01 02:41:48 +08:00
|
|
|
struct nd_device_driver {
|
|
|
|
struct device_driver drv;
|
|
|
|
unsigned long type;
|
|
|
|
int (*probe)(struct device *dev);
|
2021-02-13 01:10:43 +08:00
|
|
|
void (*remove)(struct device *dev);
|
2016-07-09 15:12:52 +08:00
|
|
|
void (*shutdown)(struct device *dev);
|
2016-02-19 02:29:49 +08:00
|
|
|
void (*notify)(struct device *dev, enum nvdimm_event event);
|
2015-06-01 02:41:48 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct nd_device_driver *to_nd_device_driver(
|
|
|
|
struct device_driver *drv)
|
|
|
|
{
|
|
|
|
return container_of(drv, struct nd_device_driver, drv);
|
2015-06-01 03:02:11 +08:00
|
|
|
};
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
/**
|
|
|
|
* struct nd_namespace_common - core infrastructure of a namespace
|
|
|
|
* @force_raw: ignore other personalities for the namespace (e.g. btt)
|
|
|
|
* @dev: device model node
|
|
|
|
* @claim: when set a another personality has taken ownership of the namespace
|
2017-06-04 09:18:39 +08:00
|
|
|
* @claim_class: restrict claim type to a given class
|
2015-06-25 16:20:04 +08:00
|
|
|
* @rw_bytes: access the raw namespace capacity with byte-aligned transfers
|
|
|
|
*/
|
|
|
|
struct nd_namespace_common {
|
|
|
|
int force_raw;
|
|
|
|
struct device dev;
|
|
|
|
struct device *claim;
|
2017-06-04 09:18:39 +08:00
|
|
|
enum nvdimm_claim_class claim_class;
|
2015-06-25 16:20:04 +08:00
|
|
|
int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
|
2017-05-11 05:01:30 +08:00
|
|
|
void *buf, size_t size, int rw, unsigned long flags);
|
2015-06-25 16:20:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct nd_namespace_common *to_ndns(struct device *dev)
|
|
|
|
{
|
|
|
|
return container_of(dev, struct nd_namespace_common, dev);
|
|
|
|
}
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
/**
|
2016-03-22 15:22:16 +08:00
|
|
|
* struct nd_namespace_io - device representation of a persistent memory range
|
2015-06-18 05:14:46 +08:00
|
|
|
* @dev: namespace device created by the nd region driver
|
|
|
|
* @res: struct resource conversion of a NFIT SPA table
|
2016-03-22 15:22:16 +08:00
|
|
|
* @size: cached resource_size(@res) for fast path size checks
|
|
|
|
* @addr: virtual address to access the namespace range
|
|
|
|
* @bb: badblocks list for the namespace range
|
2015-06-18 05:14:46 +08:00
|
|
|
*/
|
2015-06-01 03:02:11 +08:00
|
|
|
struct nd_namespace_io {
|
2015-06-25 16:20:04 +08:00
|
|
|
struct nd_namespace_common common;
|
2015-06-01 03:02:11 +08:00
|
|
|
struct resource res;
|
2016-03-22 15:22:16 +08:00
|
|
|
resource_size_t size;
|
2016-06-04 09:06:47 +08:00
|
|
|
void *addr;
|
2016-03-22 15:22:16 +08:00
|
|
|
struct badblocks bb;
|
2015-06-01 03:02:11 +08:00
|
|
|
};
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
/**
|
|
|
|
* struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
|
|
|
|
* @nsio: device and system physical address range to drive
|
2017-06-04 11:12:07 +08:00
|
|
|
* @lbasize: logical sector size for the namespace in block-device-mode
|
2015-06-18 05:14:46 +08:00
|
|
|
* @alt_name: namespace name supplied in the dimm label
|
|
|
|
* @uuid: namespace name supplied in the dimm label
|
2016-10-07 14:13:15 +08:00
|
|
|
* @id: ida allocated id
|
2015-06-18 05:14:46 +08:00
|
|
|
*/
|
|
|
|
struct nd_namespace_pmem {
|
|
|
|
struct nd_namespace_io nsio;
|
2017-06-04 11:12:07 +08:00
|
|
|
unsigned long lbasize;
|
2015-06-18 05:14:46 +08:00
|
|
|
char *alt_name;
|
2021-09-09 13:11:37 +08:00
|
|
|
uuid_t *uuid;
|
2016-10-07 14:13:15 +08:00
|
|
|
int id;
|
2015-06-18 05:14:46 +08:00
|
|
|
};
|
|
|
|
|
2016-10-06 05:04:15 +08:00
|
|
|
static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
|
2015-06-01 03:02:11 +08:00
|
|
|
{
|
2015-06-25 16:20:04 +08:00
|
|
|
return container_of(dev, struct nd_namespace_io, common.dev);
|
2015-06-01 02:41:48 +08:00
|
|
|
}
|
|
|
|
|
2016-10-06 05:04:15 +08:00
|
|
|
static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device *dev)
|
2015-06-18 05:14:46 +08:00
|
|
|
{
|
|
|
|
struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
|
|
|
|
|
|
|
|
return container_of(nsio, struct nd_namespace_pmem, nsio);
|
|
|
|
}
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
/**
|
|
|
|
* nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
|
|
|
|
* @ndns: device to read
|
|
|
|
* @offset: namespace-relative starting offset
|
|
|
|
* @buf: buffer to fill
|
|
|
|
* @size: transfer length
|
|
|
|
*
|
|
|
|
* @buf is up-to-date upon return from this routine.
|
|
|
|
*/
|
|
|
|
static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
|
2017-05-11 05:01:30 +08:00
|
|
|
resource_size_t offset, void *buf, size_t size,
|
|
|
|
unsigned long flags)
|
2015-06-25 16:20:04 +08:00
|
|
|
{
|
2017-05-11 05:01:30 +08:00
|
|
|
return ndns->rw_bytes(ndns, offset, buf, size, READ, flags);
|
2015-06-25 16:20:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
|
2019-11-15 11:06:47 +08:00
|
|
|
* @ndns: device to write
|
2015-06-25 16:20:04 +08:00
|
|
|
* @offset: namespace-relative starting offset
|
|
|
|
* @buf: buffer to drain
|
|
|
|
* @size: transfer length
|
|
|
|
*
|
|
|
|
* NVDIMM Namepaces disks do not implement sectors internally. Depending on
|
|
|
|
* the @ndns, the contents of @buf may be in cpu cache, platform buffers,
|
|
|
|
* or on backing memory media upon return from this routine. Flushing
|
|
|
|
* to media is handled internal to the @ndns driver, if at all.
|
|
|
|
*/
|
|
|
|
static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
|
2017-05-11 05:01:30 +08:00
|
|
|
resource_size_t offset, void *buf, size_t size,
|
|
|
|
unsigned long flags)
|
2015-06-25 16:20:04 +08:00
|
|
|
{
|
2017-05-11 05:01:30 +08:00
|
|
|
return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags);
|
2015-05-02 01:34:01 +08:00
|
|
|
}
|
|
|
|
|
2015-06-01 02:41:48 +08:00
|
|
|
#define MODULE_ALIAS_ND_DEVICE(type) \
|
|
|
|
MODULE_ALIAS("nd:t" __stringify(type) "*")
|
|
|
|
#define ND_DEVICE_MODALIAS_FMT "nd:t%d"
|
|
|
|
|
2016-02-19 02:29:49 +08:00
|
|
|
struct nd_region;
|
|
|
|
void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
|
2015-06-01 02:41:48 +08:00
|
|
|
int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
|
|
|
|
struct module *module, const char *mod_name);
|
2018-03-15 02:25:06 +08:00
|
|
|
static inline void nd_driver_unregister(struct nd_device_driver *drv)
|
|
|
|
{
|
|
|
|
driver_unregister(&drv->drv);
|
|
|
|
}
|
2015-06-01 02:41:48 +08:00
|
|
|
#define nd_driver_register(driver) \
|
|
|
|
__nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
|
2018-03-15 02:25:06 +08:00
|
|
|
#define module_nd_driver(driver) \
|
|
|
|
module_driver(driver, nd_driver_register, nd_driver_unregister)
|
2015-06-01 02:41:48 +08:00
|
|
|
#endif /* __LINUX_ND_H__ */
|