Merge branch 'for-4.17/libnvdimm' into libnvdimm-for-next
This commit is contained in:
commit
1ed41b5696
|
@ -0,0 +1,65 @@
|
|||
Device-tree bindings for persistent memory regions
|
||||
-----------------------------------------------------
|
||||
|
||||
Persistent memory refers to a class of memory devices that are:
|
||||
|
||||
a) Usable as main system memory (i.e. cacheable), and
|
||||
b) Retain their contents across power failure.
|
||||
|
||||
Given b) it is best to think of persistent memory as a kind of memory mapped
|
||||
storage device. To ensure data integrity the operating system needs to manage
|
||||
persistent regions separately to the normal memory pool. To aid with that this
|
||||
binding provides a standardised interface for discovering where persistent
|
||||
memory regions exist inside the physical address space.
|
||||
|
||||
Bindings for the region nodes:
|
||||
-----------------------------
|
||||
|
||||
Required properties:
|
||||
- compatible = "pmem-region"
|
||||
|
||||
- reg = <base, size>;
|
||||
The reg property should specificy an address range that is
|
||||
translatable to a system physical address range. This address
|
||||
range should be mappable as normal system memory would be
|
||||
(i.e cacheable).
|
||||
|
||||
If the reg property contains multiple address ranges
|
||||
each address range will be treated as though it was specified
|
||||
in a separate device node. Having multiple address ranges in a
|
||||
node implies no special relationship between the two ranges.
|
||||
|
||||
Optional properties:
|
||||
- Any relevant NUMA assocativity properties for the target platform.
|
||||
|
||||
- volatile; This property indicates that this region is actually
|
||||
backed by non-persistent memory. This lets the OS know that it
|
||||
may skip the cache flushes required to ensure data is made
|
||||
persistent after a write.
|
||||
|
||||
If this property is absent then the OS must assume that the region
|
||||
is backed by non-volatile memory.
|
||||
|
||||
Examples:
|
||||
--------------------
|
||||
|
||||
/*
|
||||
* This node specifies one 4KB region spanning from
|
||||
* 0x5000 to 0x5fff that is backed by non-volatile memory.
|
||||
*/
|
||||
pmem@5000 {
|
||||
compatible = "pmem-region";
|
||||
reg = <0x00005000 0x00001000>;
|
||||
};
|
||||
|
||||
/*
|
||||
* This node specifies two 4KB regions that are backed by
|
||||
* volatile (normal) memory.
|
||||
*/
|
||||
pmem@6000 {
|
||||
compatible = "pmem-region";
|
||||
reg = < 0x00006000 0x00001000
|
||||
0x00008000 0x00001000 >;
|
||||
volatile;
|
||||
};
|
||||
|
|
@ -8035,6 +8035,14 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
|||
S: Supported
|
||||
F: drivers/nvdimm/pmem*
|
||||
|
||||
LIBNVDIMM: DEVICETREE BINDINGS
|
||||
M: Oliver O'Halloran <oohall@gmail.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
S: Supported
|
||||
F: drivers/nvdimm/of_pmem.c
|
||||
F: Documentation/devicetree/bindings/pmem/pmem-region.txt
|
||||
|
||||
LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
|
|
|
@ -821,6 +821,9 @@ static int __init opal_init(void)
|
|||
/* Create i2c platform devices */
|
||||
opal_pdev_init("ibm,opal-i2c");
|
||||
|
||||
/* Handle non-volatile memory devices */
|
||||
opal_pdev_init("pmem-region");
|
||||
|
||||
/* Setup a heatbeat thread if requested by OPAL */
|
||||
opal_init_heartbeat();
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -51,9 +51,8 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
|
|||
if ((spa->address + spa->length - 1) < mce->addr)
|
||||
continue;
|
||||
found_match = 1;
|
||||
dev_dbg(dev, "%s: addr in SPA %d (0x%llx, 0x%llx)\n",
|
||||
__func__, spa->range_index, spa->address,
|
||||
spa->length);
|
||||
dev_dbg(dev, "addr in SPA %d (0x%llx, 0x%llx)\n",
|
||||
spa->range_index, spa->address, spa->length);
|
||||
/*
|
||||
* We can break at the first match because we're going
|
||||
* to rescan all the SPA ranges. There shouldn't be any
|
||||
|
|
|
@ -117,10 +117,17 @@ enum nfit_dimm_notifiers {
|
|||
NFIT_NOTIFY_DIMM_HEALTH = 0x81,
|
||||
};
|
||||
|
||||
enum nfit_ars_state {
|
||||
ARS_REQ,
|
||||
ARS_DONE,
|
||||
ARS_SHORT,
|
||||
ARS_FAILED,
|
||||
};
|
||||
|
||||
struct nfit_spa {
|
||||
struct list_head list;
|
||||
struct nd_region *nd_region;
|
||||
unsigned int ars_required:1;
|
||||
unsigned long ars_state;
|
||||
u32 clear_err_unit;
|
||||
u32 max_ars;
|
||||
struct acpi_nfit_system_address spa[0];
|
||||
|
@ -171,9 +178,8 @@ struct nfit_mem {
|
|||
struct resource *flush_wpq;
|
||||
unsigned long dsm_mask;
|
||||
int family;
|
||||
u32 has_lsi:1;
|
||||
u32 has_lsr:1;
|
||||
u32 has_lsw:1;
|
||||
bool has_lsr;
|
||||
bool has_lsw;
|
||||
};
|
||||
|
||||
struct acpi_nfit_desc {
|
||||
|
@ -191,18 +197,18 @@ struct acpi_nfit_desc {
|
|||
struct device *dev;
|
||||
u8 ars_start_flags;
|
||||
struct nd_cmd_ars_status *ars_status;
|
||||
size_t ars_status_size;
|
||||
struct work_struct work;
|
||||
struct delayed_work dwork;
|
||||
struct list_head list;
|
||||
struct kernfs_node *scrub_count_state;
|
||||
unsigned int max_ars;
|
||||
unsigned int scrub_count;
|
||||
unsigned int scrub_mode;
|
||||
unsigned int cancel:1;
|
||||
unsigned int init_complete:1;
|
||||
unsigned long dimm_cmd_force_en;
|
||||
unsigned long bus_cmd_force_en;
|
||||
unsigned long bus_nfit_cmd_force_en;
|
||||
unsigned int platform_cap;
|
||||
unsigned int scrub_tmo;
|
||||
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
|
||||
void *iobuf, u64 len, int rw);
|
||||
};
|
||||
|
@ -244,7 +250,7 @@ struct nfit_blk {
|
|||
|
||||
extern struct list_head acpi_descs;
|
||||
extern struct mutex acpi_desc_lock;
|
||||
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags);
|
||||
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags);
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
void nfit_mce_register(void);
|
||||
|
|
|
@ -257,8 +257,8 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
|||
|
||||
dax_region = dev_dax->region;
|
||||
if (dax_region->align > PAGE_SIZE) {
|
||||
dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
|
||||
__func__, dax_region->align, fault_size);
|
||||
dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
|
||||
dax_region->align, fault_size);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
|
@ -267,8 +267,7 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
|||
|
||||
phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
vmf->pgoff);
|
||||
dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
|
@ -299,14 +298,14 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
|||
|
||||
dax_region = dev_dax->region;
|
||||
if (dax_region->align > PMD_SIZE) {
|
||||
dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
|
||||
__func__, dax_region->align, fault_size);
|
||||
dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
|
||||
dax_region->align, fault_size);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
/* dax pmd mappings require pfn_t_devmap() */
|
||||
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
|
||||
dev_dbg(dev, "%s: region lacks devmap flags\n", __func__);
|
||||
dev_dbg(dev, "region lacks devmap flags\n");
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
|
@ -323,8 +322,7 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
|||
pgoff = linear_page_index(vmf->vma, pmd_addr);
|
||||
phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
|
@ -351,14 +349,14 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
|||
|
||||
dax_region = dev_dax->region;
|
||||
if (dax_region->align > PUD_SIZE) {
|
||||
dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
|
||||
__func__, dax_region->align, fault_size);
|
||||
dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
|
||||
dax_region->align, fault_size);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
/* dax pud mappings require pfn_t_devmap() */
|
||||
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
|
||||
dev_dbg(dev, "%s: region lacks devmap flags\n", __func__);
|
||||
dev_dbg(dev, "region lacks devmap flags\n");
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
|
@ -375,8 +373,7 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
|||
pgoff = linear_page_index(vmf->vma, pud_addr);
|
||||
phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
|
@ -399,9 +396,8 @@ static int dev_dax_huge_fault(struct vm_fault *vmf,
|
|||
struct file *filp = vmf->vma->vm_file;
|
||||
struct dev_dax *dev_dax = filp->private_data;
|
||||
|
||||
dev_dbg(&dev_dax->dev, "%s: %s: %s (%#lx - %#lx) size = %d\n", __func__,
|
||||
current->comm, (vmf->flags & FAULT_FLAG_WRITE)
|
||||
? "write" : "read",
|
||||
dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
|
||||
(vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
|
||||
vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
|
||||
|
||||
id = dax_read_lock();
|
||||
|
@ -450,7 +446,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
struct dev_dax *dev_dax = filp->private_data;
|
||||
int rc, id;
|
||||
|
||||
dev_dbg(&dev_dax->dev, "%s\n", __func__);
|
||||
dev_dbg(&dev_dax->dev, "trace\n");
|
||||
|
||||
/*
|
||||
* We lock to check dax_dev liveness and will re-check at
|
||||
|
@ -508,7 +504,7 @@ static int dax_open(struct inode *inode, struct file *filp)
|
|||
struct inode *__dax_inode = dax_inode(dax_dev);
|
||||
struct dev_dax *dev_dax = dax_get_private(dax_dev);
|
||||
|
||||
dev_dbg(&dev_dax->dev, "%s\n", __func__);
|
||||
dev_dbg(&dev_dax->dev, "trace\n");
|
||||
inode->i_mapping = __dax_inode->i_mapping;
|
||||
inode->i_mapping->host = __dax_inode;
|
||||
filp->f_mapping = inode->i_mapping;
|
||||
|
@ -523,7 +519,7 @@ static int dax_release(struct inode *inode, struct file *filp)
|
|||
{
|
||||
struct dev_dax *dev_dax = filp->private_data;
|
||||
|
||||
dev_dbg(&dev_dax->dev, "%s\n", __func__);
|
||||
dev_dbg(&dev_dax->dev, "trace\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -565,7 +561,7 @@ static void unregister_dev_dax(void *dev)
|
|||
struct inode *inode = dax_inode(dax_dev);
|
||||
struct cdev *cdev = inode->i_cdev;
|
||||
|
||||
dev_dbg(dev, "%s\n", __func__);
|
||||
dev_dbg(dev, "trace\n");
|
||||
|
||||
kill_dev_dax(dev_dax);
|
||||
cdev_device_del(cdev, dev);
|
||||
|
|
|
@ -34,7 +34,7 @@ static void dax_pmem_percpu_release(struct percpu_ref *ref)
|
|||
{
|
||||
struct dax_pmem *dax_pmem = to_dax_pmem(ref);
|
||||
|
||||
dev_dbg(dax_pmem->dev, "%s\n", __func__);
|
||||
dev_dbg(dax_pmem->dev, "trace\n");
|
||||
complete(&dax_pmem->cmp);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ static void dax_pmem_percpu_exit(void *data)
|
|||
struct percpu_ref *ref = data;
|
||||
struct dax_pmem *dax_pmem = to_dax_pmem(ref);
|
||||
|
||||
dev_dbg(dax_pmem->dev, "%s\n", __func__);
|
||||
dev_dbg(dax_pmem->dev, "trace\n");
|
||||
wait_for_completion(&dax_pmem->cmp);
|
||||
percpu_ref_exit(ref);
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
|
|||
struct percpu_ref *ref = data;
|
||||
struct dax_pmem *dax_pmem = to_dax_pmem(ref);
|
||||
|
||||
dev_dbg(dax_pmem->dev, "%s\n", __func__);
|
||||
dev_dbg(dax_pmem->dev, "trace\n");
|
||||
percpu_ref_kill(ref);
|
||||
}
|
||||
|
||||
|
@ -150,17 +150,7 @@ static struct nd_device_driver dax_pmem_driver = {
|
|||
.type = ND_DRIVER_DAX_PMEM,
|
||||
};
|
||||
|
||||
static int __init dax_pmem_init(void)
|
||||
{
|
||||
return nd_driver_register(&dax_pmem_driver);
|
||||
}
|
||||
module_init(dax_pmem_init);
|
||||
|
||||
static void __exit dax_pmem_exit(void)
|
||||
{
|
||||
driver_unregister(&dax_pmem_driver.drv);
|
||||
}
|
||||
module_exit(dax_pmem_exit);
|
||||
module_nd_driver(dax_pmem_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
|
|
|
@ -102,4 +102,15 @@ config NVDIMM_DAX
|
|||
|
||||
Select Y if unsure
|
||||
|
||||
config OF_PMEM
|
||||
# FIXME: make tristate once OF_NUMA dependency removed
|
||||
bool "Device-tree support for persistent memory regions"
|
||||
depends on OF
|
||||
default LIBNVDIMM
|
||||
help
|
||||
Allows regions of persistent memory to be described in the
|
||||
device-tree.
|
||||
|
||||
Select Y if unsure.
|
||||
|
||||
endif
|
||||
|
|
|
@ -4,6 +4,7 @@ obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
|
|||
obj-$(CONFIG_ND_BTT) += nd_btt.o
|
||||
obj-$(CONFIG_ND_BLK) += nd_blk.o
|
||||
obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
|
||||
obj-$(CONFIG_OF_PMEM) += of_pmem.o
|
||||
|
||||
nd_pmem-y := pmem.o
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ static void nd_btt_release(struct device *dev)
|
|||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
struct nd_btt *nd_btt = to_nd_btt(dev);
|
||||
|
||||
dev_dbg(dev, "%s\n", __func__);
|
||||
dev_dbg(dev, "trace\n");
|
||||
nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns);
|
||||
ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
|
||||
kfree(nd_btt->uuid);
|
||||
|
@ -74,8 +74,8 @@ static ssize_t sector_size_store(struct device *dev,
|
|||
nvdimm_bus_lock(dev);
|
||||
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
|
||||
btt_lbasize_supported);
|
||||
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
|
||||
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
|
||||
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
||||
buf[len - 1] == '\n' ? "" : "\n");
|
||||
nvdimm_bus_unlock(dev);
|
||||
device_unlock(dev);
|
||||
|
||||
|
@ -101,8 +101,8 @@ static ssize_t uuid_store(struct device *dev,
|
|||
|
||||
device_lock(dev);
|
||||
rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
|
||||
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
|
||||
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
|
||||
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
||||
buf[len - 1] == '\n' ? "" : "\n");
|
||||
device_unlock(dev);
|
||||
|
||||
return rc ? rc : len;
|
||||
|
@ -131,8 +131,8 @@ static ssize_t namespace_store(struct device *dev,
|
|||
device_lock(dev);
|
||||
nvdimm_bus_lock(dev);
|
||||
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
|
||||
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
|
||||
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
|
||||
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
||||
buf[len - 1] == '\n' ? "" : "\n");
|
||||
nvdimm_bus_unlock(dev);
|
||||
device_unlock(dev);
|
||||
|
||||
|
@ -206,8 +206,8 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
|
|||
dev->groups = nd_btt_attribute_groups;
|
||||
device_initialize(&nd_btt->dev);
|
||||
if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
|
||||
dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
|
||||
__func__, dev_name(ndns->claim));
|
||||
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
|
||||
dev_name(ndns->claim));
|
||||
put_device(dev);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -346,8 +346,7 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
|
|||
return -ENOMEM;
|
||||
btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
|
||||
rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb);
|
||||
dev_dbg(dev, "%s: btt: %s\n", __func__,
|
||||
rc == 0 ? dev_name(btt_dev) : "<none>");
|
||||
dev_dbg(dev, "btt: %s\n", rc == 0 ? dev_name(btt_dev) : "<none>");
|
||||
if (rc < 0) {
|
||||
struct nd_btt *nd_btt = to_nd_btt(btt_dev);
|
||||
|
||||
|
|
|
@ -358,6 +358,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
|
|||
nvdimm_bus->dev.release = nvdimm_bus_release;
|
||||
nvdimm_bus->dev.groups = nd_desc->attr_groups;
|
||||
nvdimm_bus->dev.bus = &nvdimm_bus_type;
|
||||
nvdimm_bus->dev.of_node = nd_desc->of_node;
|
||||
dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
|
||||
rc = device_register(&nvdimm_bus->dev);
|
||||
if (rc) {
|
||||
|
@ -984,8 +985,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||
|
||||
if (cmd == ND_CMD_CALL) {
|
||||
func = pkg.nd_command;
|
||||
dev_dbg(dev, "%s:%s, idx: %llu, in: %u, out: %u, len %llu\n",
|
||||
__func__, dimm_name, pkg.nd_command,
|
||||
dev_dbg(dev, "%s, idx: %llu, in: %u, out: %u, len %llu\n",
|
||||
dimm_name, pkg.nd_command,
|
||||
in_len, out_len, buf_len);
|
||||
}
|
||||
|
||||
|
@ -996,8 +997,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||
u32 copy;
|
||||
|
||||
if (out_size == UINT_MAX) {
|
||||
dev_dbg(dev, "%s:%s unknown output size cmd: %s field: %d\n",
|
||||
__func__, dimm_name, cmd_name, i);
|
||||
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
|
||||
dimm_name, cmd_name, i);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (out_len < sizeof(out_env))
|
||||
|
@ -1012,9 +1013,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||
|
||||
buf_len = (u64) out_len + (u64) in_len;
|
||||
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
|
||||
dev_dbg(dev, "%s:%s cmd: %s buf_len: %llu > %d\n", __func__,
|
||||
dimm_name, cmd_name, buf_len,
|
||||
ND_IOCTL_MAX_BUFLEN);
|
||||
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
|
||||
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -148,7 +148,7 @@ ssize_t nd_namespace_store(struct device *dev,
|
|||
char *name;
|
||||
|
||||
if (dev->driver) {
|
||||
dev_dbg(dev, "%s: -EBUSY\n", __func__);
|
||||
dev_dbg(dev, "namespace already active\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ static void nvdimm_map_release(struct kref *kref)
|
|||
nvdimm_map = container_of(kref, struct nvdimm_map, kref);
|
||||
nvdimm_bus = nvdimm_map->nvdimm_bus;
|
||||
|
||||
dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset);
|
||||
dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset);
|
||||
list_del(&nvdimm_map->list);
|
||||
if (nvdimm_map->flags)
|
||||
memunmap(nvdimm_map->mem);
|
||||
|
@ -230,8 +230,8 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
|
|||
|
||||
for (i = 0; i < 16; i++) {
|
||||
if (!isxdigit(str[0]) || !isxdigit(str[1])) {
|
||||
dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n",
|
||||
__func__, i, str - buf, str[0],
|
||||
dev_dbg(dev, "pos: %d buf[%zd]: %c buf[%zd]: %c\n",
|
||||
i, str - buf, str[0],
|
||||
str + 1 - buf, str[1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ static void nd_dax_release(struct device *dev)
|
|||
struct nd_dax *nd_dax = to_nd_dax(dev);
|
||||
struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
|
||||
|
||||
dev_dbg(dev, "%s\n", __func__);
|
||||
dev_dbg(dev, "trace\n");
|
||||
nd_detach_ndns(dev, &nd_pfn->ndns);
|
||||
ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
|
||||
kfree(nd_pfn->uuid);
|
||||
|
@ -129,8 +129,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
|
|||
pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
|
||||
nd_pfn->pfn_sb = pfn_sb;
|
||||
rc = nd_pfn_validate(nd_pfn, DAX_SIG);
|
||||
dev_dbg(dev, "%s: dax: %s\n", __func__,
|
||||
rc == 0 ? dev_name(dax_dev) : "<none>");
|
||||
dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
|
||||
if (rc < 0) {
|
||||
nd_detach_ndns(dax_dev, &nd_pfn->ndns);
|
||||
put_device(dax_dev);
|
||||
|
|
|
@ -67,9 +67,11 @@ static int nvdimm_probe(struct device *dev)
|
|||
ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
|
||||
nd_label_copy(ndd, to_next_namespace_index(ndd),
|
||||
to_current_namespace_index(ndd));
|
||||
rc = nd_label_reserve_dpa(ndd);
|
||||
if (ndd->ns_current >= 0)
|
||||
nvdimm_set_aliasing(dev);
|
||||
if (ndd->ns_current >= 0) {
|
||||
rc = nd_label_reserve_dpa(ndd);
|
||||
if (rc == 0)
|
||||
nvdimm_set_aliasing(dev);
|
||||
}
|
||||
nvdimm_clear_locked(dev);
|
||||
nvdimm_bus_unlock(dev);
|
||||
|
||||
|
|
|
@ -131,7 +131,7 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
|
|||
}
|
||||
memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
|
||||
}
|
||||
dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc);
|
||||
dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
|
||||
kfree(cmd);
|
||||
|
||||
return rc;
|
||||
|
@ -266,8 +266,7 @@ void nvdimm_drvdata_release(struct kref *kref)
|
|||
struct device *dev = ndd->dev;
|
||||
struct resource *res, *_r;
|
||||
|
||||
dev_dbg(dev, "%s\n", __func__);
|
||||
|
||||
dev_dbg(dev, "trace\n");
|
||||
nvdimm_bus_lock(dev);
|
||||
for_each_dpa_resource_safe(ndd, res, _r)
|
||||
nvdimm_free_dpa(ndd, res);
|
||||
|
@ -660,7 +659,7 @@ int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
|
|||
nd_synchronize();
|
||||
|
||||
device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
|
||||
dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count);
|
||||
dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
|
||||
if (count != dimm_count)
|
||||
return -ENXIO;
|
||||
return 0;
|
||||
|
|
|
@ -45,9 +45,27 @@ unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
|
|||
return ndd->nslabel_size;
|
||||
}
|
||||
|
||||
static size_t __sizeof_namespace_index(u32 nslot)
|
||||
{
|
||||
return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
|
||||
NSINDEX_ALIGN);
|
||||
}
|
||||
|
||||
static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
|
||||
size_t index_size)
|
||||
{
|
||||
return (ndd->nsarea.config_size - index_size * 2) /
|
||||
sizeof_namespace_label(ndd);
|
||||
}
|
||||
|
||||
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
|
||||
{
|
||||
return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1);
|
||||
u32 tmp_nslot, n;
|
||||
|
||||
tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
|
||||
n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
|
||||
|
||||
return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
|
||||
}
|
||||
|
||||
size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
|
||||
|
@ -55,18 +73,14 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
|
|||
u32 nslot, space, size;
|
||||
|
||||
/*
|
||||
* The minimum index space is 512 bytes, with that amount of
|
||||
* index we can describe ~1400 labels which is less than a byte
|
||||
* of overhead per label. Round up to a byte of overhead per
|
||||
* label and determine the size of the index region. Yes, this
|
||||
* starts to waste space at larger config_sizes, but it's
|
||||
* unlikely we'll ever see anything but 128K.
|
||||
* Per UEFI 2.7, the minimum size of the Label Storage Area is large
|
||||
* enough to hold 2 index blocks and 2 labels. The minimum index
|
||||
* block size is 256 bytes, and the minimum label size is 256 bytes.
|
||||
*/
|
||||
nslot = nvdimm_num_label_slots(ndd);
|
||||
space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
|
||||
size = ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
|
||||
NSINDEX_ALIGN) * 2;
|
||||
if (size <= space)
|
||||
size = __sizeof_namespace_index(nslot) * 2;
|
||||
if (size <= space && nslot >= 2)
|
||||
return size / 2;
|
||||
|
||||
dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
|
||||
|
@ -121,8 +135,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
|
|||
|
||||
memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
|
||||
if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
|
||||
dev_dbg(dev, "%s: nsindex%d signature invalid\n",
|
||||
__func__, i);
|
||||
dev_dbg(dev, "nsindex%d signature invalid\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -135,8 +148,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
|
|||
labelsize = 128;
|
||||
|
||||
if (labelsize != sizeof_namespace_label(ndd)) {
|
||||
dev_dbg(dev, "%s: nsindex%d labelsize %d invalid\n",
|
||||
__func__, i, nsindex[i]->labelsize);
|
||||
dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
|
||||
i, nsindex[i]->labelsize);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -145,30 +158,28 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
|
|||
sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
|
||||
nsindex[i]->checksum = __cpu_to_le64(sum_save);
|
||||
if (sum != sum_save) {
|
||||
dev_dbg(dev, "%s: nsindex%d checksum invalid\n",
|
||||
__func__, i);
|
||||
dev_dbg(dev, "nsindex%d checksum invalid\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
seq = __le32_to_cpu(nsindex[i]->seq);
|
||||
if ((seq & NSINDEX_SEQ_MASK) == 0) {
|
||||
dev_dbg(dev, "%s: nsindex%d sequence: %#x invalid\n",
|
||||
__func__, i, seq);
|
||||
dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* sanity check the index against expected values */
|
||||
if (__le64_to_cpu(nsindex[i]->myoff)
|
||||
!= i * sizeof_namespace_index(ndd)) {
|
||||
dev_dbg(dev, "%s: nsindex%d myoff: %#llx invalid\n",
|
||||
__func__, i, (unsigned long long)
|
||||
dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
|
||||
i, (unsigned long long)
|
||||
__le64_to_cpu(nsindex[i]->myoff));
|
||||
continue;
|
||||
}
|
||||
if (__le64_to_cpu(nsindex[i]->otheroff)
|
||||
!= (!i) * sizeof_namespace_index(ndd)) {
|
||||
dev_dbg(dev, "%s: nsindex%d otheroff: %#llx invalid\n",
|
||||
__func__, i, (unsigned long long)
|
||||
dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
|
||||
i, (unsigned long long)
|
||||
__le64_to_cpu(nsindex[i]->otheroff));
|
||||
continue;
|
||||
}
|
||||
|
@ -176,8 +187,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
|
|||
size = __le64_to_cpu(nsindex[i]->mysize);
|
||||
if (size > sizeof_namespace_index(ndd)
|
||||
|| size < sizeof(struct nd_namespace_index)) {
|
||||
dev_dbg(dev, "%s: nsindex%d mysize: %#llx invalid\n",
|
||||
__func__, i, size);
|
||||
dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -185,9 +195,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
|
|||
if (nslot * sizeof_namespace_label(ndd)
|
||||
+ 2 * sizeof_namespace_index(ndd)
|
||||
> ndd->nsarea.config_size) {
|
||||
dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n",
|
||||
__func__, i, nslot,
|
||||
ndd->nsarea.config_size);
|
||||
dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
|
||||
i, nslot, ndd->nsarea.config_size);
|
||||
continue;
|
||||
}
|
||||
valid[i] = true;
|
||||
|
@ -356,8 +365,8 @@ static bool slot_valid(struct nvdimm_drvdata *ndd,
|
|||
sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
|
||||
nd_label->checksum = __cpu_to_le64(sum_save);
|
||||
if (sum != sum_save) {
|
||||
dev_dbg(ndd->dev, "%s fail checksum. slot: %d expect: %#llx\n",
|
||||
__func__, slot, sum);
|
||||
dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
|
||||
slot, sum);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -422,8 +431,8 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd)
|
|||
u64 dpa = __le64_to_cpu(nd_label->dpa);
|
||||
|
||||
dev_dbg(ndd->dev,
|
||||
"%s: slot%d invalid slot: %d dpa: %llx size: %llx\n",
|
||||
__func__, slot, label_slot, dpa, size);
|
||||
"slot%d invalid slot: %d dpa: %llx size: %llx\n",
|
||||
slot, label_slot, dpa, size);
|
||||
continue;
|
||||
}
|
||||
count++;
|
||||
|
@ -650,7 +659,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
|
|||
slot = nd_label_alloc_slot(ndd);
|
||||
if (slot == UINT_MAX)
|
||||
return -ENXIO;
|
||||
dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
|
||||
dev_dbg(ndd->dev, "allocated: %d\n", slot);
|
||||
|
||||
nd_label = to_label(ndd, slot);
|
||||
memset(nd_label, 0, sizeof_namespace_label(ndd));
|
||||
|
@ -678,7 +687,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
|
|||
sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
|
||||
nd_label->checksum = __cpu_to_le64(sum);
|
||||
}
|
||||
nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__);
|
||||
nd_dbg_dpa(nd_region, ndd, res, "\n");
|
||||
|
||||
/* update label */
|
||||
offset = nd_label_offset(ndd, nd_label);
|
||||
|
@ -700,7 +709,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
|
|||
break;
|
||||
}
|
||||
if (victim) {
|
||||
dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
|
||||
dev_dbg(ndd->dev, "free: %d\n", slot);
|
||||
slot = to_slot(ndd, victim->label);
|
||||
nd_label_free_slot(ndd, slot);
|
||||
victim->label = NULL;
|
||||
|
@ -868,7 +877,7 @@ static int __blk_label_update(struct nd_region *nd_region,
|
|||
slot = nd_label_alloc_slot(ndd);
|
||||
if (slot == UINT_MAX)
|
||||
goto abort;
|
||||
dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
|
||||
dev_dbg(ndd->dev, "allocated: %d\n", slot);
|
||||
|
||||
nd_label = to_label(ndd, slot);
|
||||
memset(nd_label, 0, sizeof_namespace_label(ndd));
|
||||
|
@ -928,7 +937,7 @@ static int __blk_label_update(struct nd_region *nd_region,
|
|||
|
||||
/* free up now unused slots in the new index */
|
||||
for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
|
||||
dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
|
||||
dev_dbg(ndd->dev, "free: %d\n", slot);
|
||||
nd_label_free_slot(ndd, slot);
|
||||
}
|
||||
|
||||
|
@ -1092,7 +1101,7 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
|
|||
active--;
|
||||
slot = to_slot(ndd, nd_label);
|
||||
nd_label_free_slot(ndd, slot);
|
||||
dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
|
||||
dev_dbg(ndd->dev, "free: %d\n", slot);
|
||||
list_move_tail(&label_ent->list, &list);
|
||||
label_ent->label = NULL;
|
||||
}
|
||||
|
@ -1100,7 +1109,7 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
|
|||
|
||||
if (active == 0) {
|
||||
nd_mapping_free_labels(nd_mapping);
|
||||
dev_dbg(ndd->dev, "%s: no more active labels\n", __func__);
|
||||
dev_dbg(ndd->dev, "no more active labels\n");
|
||||
}
|
||||
mutex_unlock(&nd_mapping->lock);
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ enum {
|
|||
BTTINFO_UUID_LEN = 16,
|
||||
BTTINFO_FLAG_ERROR = 0x1, /* error state (read-only) */
|
||||
BTTINFO_MAJOR_VERSION = 1,
|
||||
ND_LABEL_MIN_SIZE = 512 * 129, /* see sizeof_namespace_index() */
|
||||
ND_LABEL_MIN_SIZE = 256 * 4, /* see sizeof_namespace_index() */
|
||||
ND_LABEL_ID_SIZE = 50,
|
||||
ND_NSINDEX_INIT = 0x1,
|
||||
};
|
||||
|
|
|
@ -421,7 +421,7 @@ static ssize_t alt_name_store(struct device *dev,
|
|||
rc = __alt_name_store(dev, buf, len);
|
||||
if (rc >= 0)
|
||||
rc = nd_namespace_label_update(nd_region, dev);
|
||||
dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
|
||||
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
|
||||
nvdimm_bus_unlock(dev);
|
||||
device_unlock(dev);
|
||||
|
||||
|
@ -1007,7 +1007,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
|
|||
if (uuid_not_set(uuid, dev, __func__))
|
||||
return -ENXIO;
|
||||
if (nd_region->ndr_mappings == 0) {
|
||||
dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__);
|
||||
dev_dbg(dev, "not associated with dimm(s)\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
@ -1105,8 +1105,7 @@ static ssize_t size_store(struct device *dev,
|
|||
*uuid = NULL;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
|
||||
? "fail" : "success", rc);
|
||||
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
|
||||
|
||||
nvdimm_bus_unlock(dev);
|
||||
device_unlock(dev);
|
||||
|
@ -1270,8 +1269,8 @@ static ssize_t uuid_store(struct device *dev,
|
|||
rc = nd_namespace_label_update(nd_region, dev);
|
||||
else
|
||||
kfree(uuid);
|
||||
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
|
||||
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
|
||||
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
||||
buf[len - 1] == '\n' ? "" : "\n");
|
||||
nvdimm_bus_unlock(dev);
|
||||
device_unlock(dev);
|
||||
|
||||
|
@ -1355,9 +1354,8 @@ static ssize_t sector_size_store(struct device *dev,
|
|||
rc = nd_size_select_store(dev, buf, lbasize, supported);
|
||||
if (rc >= 0)
|
||||
rc = nd_namespace_label_update(nd_region, dev);
|
||||
dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
|
||||
rc, rc < 0 ? "tried" : "wrote", buf,
|
||||
buf[len - 1] == '\n' ? "" : "\n");
|
||||
dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
|
||||
buf, buf[len - 1] == '\n' ? "" : "\n");
|
||||
nvdimm_bus_unlock(dev);
|
||||
device_unlock(dev);
|
||||
|
||||
|
@ -1519,7 +1517,7 @@ static ssize_t holder_class_store(struct device *dev,
|
|||
rc = __holder_class_store(dev, buf);
|
||||
if (rc >= 0)
|
||||
rc = nd_namespace_label_update(nd_region, dev);
|
||||
dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
|
||||
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
|
||||
nvdimm_bus_unlock(dev);
|
||||
device_unlock(dev);
|
||||
|
||||
|
@ -1717,8 +1715,7 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
|
|||
if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
|
||||
return ERR_PTR(-ENODEV);
|
||||
if (!nsblk->lbasize) {
|
||||
dev_dbg(&ndns->dev, "%s: sector size not set\n",
|
||||
__func__);
|
||||
dev_dbg(&ndns->dev, "sector size not set\n");
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
if (!nd_namespace_blk_validate(nsblk))
|
||||
|
@ -1798,9 +1795,7 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
|
|||
}
|
||||
|
||||
if (found_uuid) {
|
||||
dev_dbg(ndd->dev,
|
||||
"%s duplicate entry for uuid\n",
|
||||
__func__);
|
||||
dev_dbg(ndd->dev, "duplicate entry for uuid\n");
|
||||
return false;
|
||||
}
|
||||
found_uuid = true;
|
||||
|
@ -1926,7 +1921,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
|||
}
|
||||
|
||||
if (i < nd_region->ndr_mappings) {
|
||||
struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
|
||||
struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
|
||||
|
||||
/*
|
||||
* Give up if we don't find an instance of a uuid at each
|
||||
|
@ -1934,7 +1929,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
|||
* find a dimm with two instances of the same uuid.
|
||||
*/
|
||||
dev_err(&nd_region->dev, "%s missing label for %pUb\n",
|
||||
dev_name(ndd->dev), nd_label->uuid);
|
||||
nvdimm_name(nvdimm), nd_label->uuid);
|
||||
rc = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
@ -1994,14 +1989,13 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
|||
namespace_pmem_release(dev);
|
||||
switch (rc) {
|
||||
case -EINVAL:
|
||||
dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
|
||||
dev_dbg(&nd_region->dev, "invalid label(s)\n");
|
||||
break;
|
||||
case -ENODEV:
|
||||
dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
|
||||
dev_dbg(&nd_region->dev, "label not found\n");
|
||||
break;
|
||||
default:
|
||||
dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
|
||||
__func__, rc);
|
||||
dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
|
||||
break;
|
||||
}
|
||||
return ERR_PTR(rc);
|
||||
|
@ -2334,8 +2328,8 @@ static struct device **scan_labels(struct nd_region *nd_region)
|
|||
|
||||
}
|
||||
|
||||
dev_dbg(&nd_region->dev, "%s: discovered %d %s namespace%s\n",
|
||||
__func__, count, is_nd_blk(&nd_region->dev)
|
||||
dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
|
||||
count, is_nd_blk(&nd_region->dev)
|
||||
? "blk" : "pmem", count == 1 ? "" : "s");
|
||||
|
||||
if (count == 0) {
|
||||
|
@ -2467,7 +2461,7 @@ static int init_active_labels(struct nd_region *nd_region)
|
|||
get_ndd(ndd);
|
||||
|
||||
count = nd_label_active_count(ndd);
|
||||
dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
|
||||
dev_dbg(ndd->dev, "count: %d\n", count);
|
||||
if (!count)
|
||||
continue;
|
||||
for (j = 0; j < count; j++) {
|
||||
|
|
|
@ -341,7 +341,6 @@ static inline struct device *nd_dax_create(struct nd_region *nd_region)
|
|||
}
|
||||
#endif
|
||||
|
||||
struct nd_region *to_nd_region(struct device *dev);
|
||||
int nd_region_to_nstype(struct nd_region *nd_region);
|
||||
int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
|
||||
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
#define pr_fmt(fmt) "of_pmem: " fmt
|
||||
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/libnvdimm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static const struct attribute_group *region_attr_groups[] = {
|
||||
&nd_region_attribute_group,
|
||||
&nd_device_attribute_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group *bus_attr_groups[] = {
|
||||
&nvdimm_bus_attribute_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
struct of_pmem_private {
|
||||
struct nvdimm_bus_descriptor bus_desc;
|
||||
struct nvdimm_bus *bus;
|
||||
};
|
||||
|
||||
static int of_pmem_region_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct of_pmem_private *priv;
|
||||
struct device_node *np;
|
||||
struct nvdimm_bus *bus;
|
||||
bool is_volatile;
|
||||
int i;
|
||||
|
||||
np = dev_of_node(&pdev->dev);
|
||||
if (!np)
|
||||
return -ENXIO;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->bus_desc.attr_groups = bus_attr_groups;
|
||||
priv->bus_desc.provider_name = "of_pmem";
|
||||
priv->bus_desc.module = THIS_MODULE;
|
||||
priv->bus_desc.of_node = np;
|
||||
|
||||
priv->bus = bus = nvdimm_bus_register(&pdev->dev, &priv->bus_desc);
|
||||
if (!bus) {
|
||||
kfree(priv);
|
||||
return -ENODEV;
|
||||
}
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
is_volatile = !!of_find_property(np, "volatile", NULL);
|
||||
dev_dbg(&pdev->dev, "Registering %s regions from %pOF\n",
|
||||
is_volatile ? "volatile" : "non-volatile", np);
|
||||
|
||||
for (i = 0; i < pdev->num_resources; i++) {
|
||||
struct nd_region_desc ndr_desc;
|
||||
struct nd_region *region;
|
||||
|
||||
/*
|
||||
* NB: libnvdimm copies the data from ndr_desc into it's own
|
||||
* structures so passing a stack pointer is fine.
|
||||
*/
|
||||
memset(&ndr_desc, 0, sizeof(ndr_desc));
|
||||
ndr_desc.attr_groups = region_attr_groups;
|
||||
ndr_desc.numa_node = of_node_to_nid(np);
|
||||
ndr_desc.res = &pdev->resource[i];
|
||||
ndr_desc.of_node = np;
|
||||
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
|
||||
|
||||
if (is_volatile)
|
||||
region = nvdimm_volatile_region_create(bus, &ndr_desc);
|
||||
else
|
||||
region = nvdimm_pmem_region_create(bus, &ndr_desc);
|
||||
|
||||
if (!region)
|
||||
dev_warn(&pdev->dev, "Unable to register region %pR from %pOF\n",
|
||||
ndr_desc.res, np);
|
||||
else
|
||||
dev_dbg(&pdev->dev, "Registered region %pR from %pOF\n",
|
||||
ndr_desc.res, np);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int of_pmem_region_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct of_pmem_private *priv = platform_get_drvdata(pdev);
|
||||
|
||||
nvdimm_bus_unregister(priv->bus);
|
||||
kfree(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id of_pmem_region_match[] = {
|
||||
{ .compatible = "pmem-region" },
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct platform_driver of_pmem_region_driver = {
|
||||
.probe = of_pmem_region_probe,
|
||||
.remove = of_pmem_region_remove,
|
||||
.driver = {
|
||||
.name = "of_pmem",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_pmem_region_match,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(of_pmem_region_driver);
|
||||
MODULE_DEVICE_TABLE(of, of_pmem_region_match);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("IBM Corporation");
|
|
@ -27,7 +27,7 @@ static void nd_pfn_release(struct device *dev)
|
|||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
|
||||
dev_dbg(dev, "%s\n", __func__);
|
||||
dev_dbg(dev, "trace\n");
|
||||
nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
|
||||
ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
|
||||
kfree(nd_pfn->uuid);
|
||||
|
@ -94,8 +94,8 @@ static ssize_t mode_store(struct device *dev,
|
|||
else
|
||||
rc = -EINVAL;
|
||||
}
|
||||
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
|
||||
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
|
||||
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
||||
buf[len - 1] == '\n' ? "" : "\n");
|
||||
nvdimm_bus_unlock(dev);
|
||||
device_unlock(dev);
|
||||
|
||||
|
@ -144,8 +144,8 @@ static ssize_t align_store(struct device *dev,
|
|||
nvdimm_bus_lock(dev);
|
||||
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
|
||||
nd_pfn_supported_alignments());
|
||||
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
|
||||
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
|
||||
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
||||
buf[len - 1] == '\n' ? "" : "\n");
|
||||
nvdimm_bus_unlock(dev);
|
||||
device_unlock(dev);
|
||||
|
||||
|
@ -171,8 +171,8 @@ static ssize_t uuid_store(struct device *dev,
|
|||
|
||||
device_lock(dev);
|
||||
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
|
||||
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
|
||||
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
|
||||
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
||||
buf[len - 1] == '\n' ? "" : "\n");
|
||||
device_unlock(dev);
|
||||
|
||||
return rc ? rc : len;
|
||||
|
@ -201,8 +201,8 @@ static ssize_t namespace_store(struct device *dev,
|
|||
device_lock(dev);
|
||||
nvdimm_bus_lock(dev);
|
||||
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
|
||||
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
|
||||
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
|
||||
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
||||
buf[len - 1] == '\n' ? "" : "\n");
|
||||
nvdimm_bus_unlock(dev);
|
||||
device_unlock(dev);
|
||||
|
||||
|
@ -314,8 +314,8 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
|
|||
dev = &nd_pfn->dev;
|
||||
device_initialize(&nd_pfn->dev);
|
||||
if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
|
||||
dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
|
||||
__func__, dev_name(ndns->claim));
|
||||
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
|
||||
dev_name(ndns->claim));
|
||||
put_device(dev);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -510,8 +510,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
|
|||
nd_pfn = to_nd_pfn(pfn_dev);
|
||||
nd_pfn->pfn_sb = pfn_sb;
|
||||
rc = nd_pfn_validate(nd_pfn, PFN_SIG);
|
||||
dev_dbg(dev, "%s: pfn: %s\n", __func__,
|
||||
rc == 0 ? dev_name(pfn_dev) : "<none>");
|
||||
dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>");
|
||||
if (rc < 0) {
|
||||
nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
|
||||
put_device(pfn_dev);
|
||||
|
|
|
@ -66,7 +66,7 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
|
|||
rc = BLK_STS_IOERR;
|
||||
if (cleared > 0 && cleared / 512) {
|
||||
cleared /= 512;
|
||||
dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
|
||||
dev_dbg(dev, "%#llx clear %ld sector%s\n",
|
||||
(unsigned long long) sector, cleared,
|
||||
cleared > 1 ? "s" : "");
|
||||
badblocks_clear(&pmem->bb, sector, cleared);
|
||||
|
@ -547,17 +547,7 @@ static struct nd_device_driver nd_pmem_driver = {
|
|||
.type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
|
||||
};
|
||||
|
||||
static int __init pmem_init(void)
|
||||
{
|
||||
return nd_driver_register(&nd_pmem_driver);
|
||||
}
|
||||
module_init(pmem_init);
|
||||
|
||||
static void pmem_exit(void)
|
||||
{
|
||||
driver_unregister(&nd_pmem_driver.drv);
|
||||
}
|
||||
module_exit(pmem_exit);
|
||||
module_nd_driver(nd_pmem_driver);
|
||||
|
||||
MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -27,10 +27,10 @@ static int nd_region_probe(struct device *dev)
|
|||
if (nd_region->num_lanes > num_online_cpus()
|
||||
&& nd_region->num_lanes < num_possible_cpus()
|
||||
&& !test_and_set_bit(0, &once)) {
|
||||
dev_info(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
|
||||
dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
|
||||
num_online_cpus(), nd_region->num_lanes,
|
||||
num_possible_cpus());
|
||||
dev_info(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
|
||||
dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
|
||||
nd_region->num_lanes);
|
||||
}
|
||||
|
||||
|
|
|
@ -182,6 +182,14 @@ struct nd_region *to_nd_region(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(to_nd_region);
|
||||
|
||||
struct device *nd_region_dev(struct nd_region *nd_region)
|
||||
{
|
||||
if (!nd_region)
|
||||
return NULL;
|
||||
return &nd_region->dev;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nd_region_dev);
|
||||
|
||||
struct nd_blk_region *to_nd_blk_region(struct device *dev)
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev);
|
||||
|
@ -1014,6 +1022,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
|
|||
dev->parent = &nvdimm_bus->dev;
|
||||
dev->type = dev_type;
|
||||
dev->groups = ndr_desc->attr_groups;
|
||||
dev->of_node = ndr_desc->of_node;
|
||||
nd_region->ndr_size = resource_size(ndr_desc->res);
|
||||
nd_region->ndr_start = ndr_desc->res->start;
|
||||
nd_device_register(dev);
|
||||
|
|
|
@ -76,12 +76,14 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
|
|||
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
|
||||
unsigned int buf_len, int *cmd_rc);
|
||||
|
||||
struct device_node;
|
||||
struct nvdimm_bus_descriptor {
|
||||
const struct attribute_group **attr_groups;
|
||||
unsigned long bus_dsm_mask;
|
||||
unsigned long cmd_mask;
|
||||
struct module *module;
|
||||
char *provider_name;
|
||||
struct device_node *of_node;
|
||||
ndctl_fn ndctl;
|
||||
int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
|
||||
int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
|
||||
|
@ -123,6 +125,7 @@ struct nd_region_desc {
|
|||
int num_lanes;
|
||||
int numa_node;
|
||||
unsigned long flags;
|
||||
struct device_node *of_node;
|
||||
};
|
||||
|
||||
struct device;
|
||||
|
@ -164,6 +167,7 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
|
|||
struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
|
||||
struct nvdimm *to_nvdimm(struct device *dev);
|
||||
struct nd_region *to_nd_region(struct device *dev);
|
||||
struct device *nd_region_dev(struct nd_region *nd_region);
|
||||
struct nd_blk_region *to_nd_blk_region(struct device *dev);
|
||||
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
|
||||
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
|
||||
|
|
|
@ -180,6 +180,12 @@ struct nd_region;
|
|||
void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
|
||||
int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
|
||||
struct module *module, const char *mod_name);
|
||||
static inline void nd_driver_unregister(struct nd_device_driver *drv)
|
||||
{
|
||||
driver_unregister(&drv->drv);
|
||||
}
|
||||
#define nd_driver_register(driver) \
|
||||
__nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
|
||||
#define module_nd_driver(driver) \
|
||||
module_driver(driver, nd_driver_register, nd_driver_unregister)
|
||||
#endif /* __LINUX_ND_H__ */
|
||||
|
|
|
@ -104,7 +104,8 @@ enum {
|
|||
NUM_HINTS = 8,
|
||||
NUM_BDW = NUM_DCR,
|
||||
NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
|
||||
NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
|
||||
NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */
|
||||
+ 4 /* spa1 iset */ + 1 /* spa11 iset */,
|
||||
DIMM_SIZE = SZ_32M,
|
||||
LABEL_SIZE = SZ_128K,
|
||||
SPA_VCD_SIZE = SZ_4M,
|
||||
|
@ -153,6 +154,7 @@ struct nfit_test {
|
|||
void *nfit_buf;
|
||||
dma_addr_t nfit_dma;
|
||||
size_t nfit_size;
|
||||
size_t nfit_filled;
|
||||
int dcr_idx;
|
||||
int num_dcr;
|
||||
int num_pm;
|
||||
|
@ -709,7 +711,9 @@ static void smart_notify(struct device *bus_dev,
|
|||
>= thresh->media_temperature)
|
||||
|| ((thresh->alarm_control & ND_INTEL_SMART_CTEMP_TRIP)
|
||||
&& smart->ctrl_temperature
|
||||
>= thresh->ctrl_temperature)) {
|
||||
>= thresh->ctrl_temperature)
|
||||
|| (smart->health != ND_INTEL_SMART_NON_CRITICAL_HEALTH)
|
||||
|| (smart->shutdown_state != 0)) {
|
||||
device_lock(bus_dev);
|
||||
__acpi_nvdimm_notify(dimm_dev, 0x81);
|
||||
device_unlock(bus_dev);
|
||||
|
@ -735,6 +739,32 @@ static int nfit_test_cmd_smart_set_threshold(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nfit_test_cmd_smart_inject(
|
||||
struct nd_intel_smart_inject *inj,
|
||||
unsigned int buf_len,
|
||||
struct nd_intel_smart_threshold *thresh,
|
||||
struct nd_intel_smart *smart,
|
||||
struct device *bus_dev, struct device *dimm_dev)
|
||||
{
|
||||
if (buf_len != sizeof(*inj))
|
||||
return -EINVAL;
|
||||
|
||||
if (inj->mtemp_enable)
|
||||
smart->media_temperature = inj->media_temperature;
|
||||
if (inj->spare_enable)
|
||||
smart->spares = inj->spares;
|
||||
if (inj->fatal_enable)
|
||||
smart->health = ND_INTEL_SMART_FATAL_HEALTH;
|
||||
if (inj->unsafe_shutdown_enable) {
|
||||
smart->shutdown_state = 1;
|
||||
smart->shutdown_count++;
|
||||
}
|
||||
inj->status = 0;
|
||||
smart_notify(bus_dev, dimm_dev, smart, thresh);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uc_error_notify(struct work_struct *work)
|
||||
{
|
||||
struct nfit_test *t = container_of(work, typeof(*t), work);
|
||||
|
@ -935,6 +965,13 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
|
|||
t->dcr_idx],
|
||||
&t->smart[i - t->dcr_idx],
|
||||
&t->pdev.dev, t->dimm_dev[i]);
|
||||
case ND_INTEL_SMART_INJECT:
|
||||
return nfit_test_cmd_smart_inject(buf,
|
||||
buf_len,
|
||||
&t->smart_threshold[i -
|
||||
t->dcr_idx],
|
||||
&t->smart[i - t->dcr_idx],
|
||||
&t->pdev.dev, t->dimm_dev[i]);
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
@ -1222,7 +1259,7 @@ static void smart_init(struct nfit_test *t)
|
|||
| ND_INTEL_SMART_MTEMP_VALID,
|
||||
.health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
|
||||
.media_temperature = 23 * 16,
|
||||
.ctrl_temperature = 30 * 16,
|
||||
.ctrl_temperature = 25 * 16,
|
||||
.pmic_temperature = 40 * 16,
|
||||
.spares = 75,
|
||||
.alarm_flags = ND_INTEL_SMART_SPARE_TRIP
|
||||
|
@ -1366,7 +1403,7 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
struct acpi_nfit_data_region *bdw;
|
||||
struct acpi_nfit_flush_address *flush;
|
||||
struct acpi_nfit_capabilities *pcap;
|
||||
unsigned int offset, i;
|
||||
unsigned int offset = 0, i;
|
||||
|
||||
/*
|
||||
* spa0 (interleave first half of dimm0 and dimm1, note storage
|
||||
|
@ -1380,93 +1417,102 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
spa->range_index = 0+1;
|
||||
spa->address = t->spa_set_dma[0];
|
||||
spa->length = SPA0_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/*
|
||||
* spa1 (interleave last half of the 4 DIMMS, note storage
|
||||
* does not actually alias the related block-data-window
|
||||
* regions)
|
||||
*/
|
||||
spa = nfit_buf + sizeof(*spa);
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
|
||||
spa->range_index = 1+1;
|
||||
spa->address = t->spa_set_dma[1];
|
||||
spa->length = SPA1_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/* spa2 (dcr0) dimm0 */
|
||||
spa = nfit_buf + sizeof(*spa) * 2;
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
|
||||
spa->range_index = 2+1;
|
||||
spa->address = t->dcr_dma[0];
|
||||
spa->length = DCR_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/* spa3 (dcr1) dimm1 */
|
||||
spa = nfit_buf + sizeof(*spa) * 3;
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
|
||||
spa->range_index = 3+1;
|
||||
spa->address = t->dcr_dma[1];
|
||||
spa->length = DCR_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/* spa4 (dcr2) dimm2 */
|
||||
spa = nfit_buf + sizeof(*spa) * 4;
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
|
||||
spa->range_index = 4+1;
|
||||
spa->address = t->dcr_dma[2];
|
||||
spa->length = DCR_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/* spa5 (dcr3) dimm3 */
|
||||
spa = nfit_buf + sizeof(*spa) * 5;
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
|
||||
spa->range_index = 5+1;
|
||||
spa->address = t->dcr_dma[3];
|
||||
spa->length = DCR_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/* spa6 (bdw for dcr0) dimm0 */
|
||||
spa = nfit_buf + sizeof(*spa) * 6;
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
|
||||
spa->range_index = 6+1;
|
||||
spa->address = t->dimm_dma[0];
|
||||
spa->length = DIMM_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/* spa7 (bdw for dcr1) dimm1 */
|
||||
spa = nfit_buf + sizeof(*spa) * 7;
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
|
||||
spa->range_index = 7+1;
|
||||
spa->address = t->dimm_dma[1];
|
||||
spa->length = DIMM_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/* spa8 (bdw for dcr2) dimm2 */
|
||||
spa = nfit_buf + sizeof(*spa) * 8;
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
|
||||
spa->range_index = 8+1;
|
||||
spa->address = t->dimm_dma[2];
|
||||
spa->length = DIMM_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/* spa9 (bdw for dcr3) dimm3 */
|
||||
spa = nfit_buf + sizeof(*spa) * 9;
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
|
||||
spa->range_index = 9+1;
|
||||
spa->address = t->dimm_dma[3];
|
||||
spa->length = DIMM_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
offset = sizeof(*spa) * 10;
|
||||
/* mem-region0 (spa0, dimm0) */
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
|
@ -1481,9 +1527,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = 0;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 2;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region1 (spa0, dimm1) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[1];
|
||||
|
@ -1497,9 +1544,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 2;
|
||||
memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region2 (spa1, dimm0) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[0];
|
||||
|
@ -1513,9 +1561,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 4;
|
||||
memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region3 (spa1, dimm1) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[1];
|
||||
|
@ -1528,9 +1577,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = SPA0_SIZE/2;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 4;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region4 (spa1, dimm2) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[2];
|
||||
|
@ -1544,9 +1594,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 4;
|
||||
memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region5 (spa1, dimm3) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[3];
|
||||
|
@ -1559,9 +1610,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = SPA0_SIZE/2;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 4;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region6 (spa/dcr0, dimm0) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[0];
|
||||
|
@ -1574,9 +1626,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = 0;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region7 (spa/dcr1, dimm1) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[1];
|
||||
|
@ -1589,9 +1642,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = 0;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region8 (spa/dcr2, dimm2) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[2];
|
||||
|
@ -1604,9 +1658,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = 0;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region9 (spa/dcr3, dimm3) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[3];
|
||||
|
@ -1619,9 +1674,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = 0;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region10 (spa/bdw0, dimm0) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[0];
|
||||
|
@ -1634,9 +1690,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = 0;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region11 (spa/bdw1, dimm1) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[1];
|
||||
|
@ -1649,9 +1706,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = 0;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region12 (spa/bdw2, dimm2) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[2];
|
||||
|
@ -1664,9 +1722,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = 0;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region13 (spa/dcr3, dimm3) */
|
||||
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[3];
|
||||
|
@ -1680,12 +1739,12 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
|
||||
offset += memdev->header.length;
|
||||
|
||||
offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
|
||||
/* dcr-descriptor0: blk */
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
dcr->header.length = sizeof(struct acpi_nfit_control_region);
|
||||
dcr->header.length = sizeof(*dcr);
|
||||
dcr->region_index = 0+1;
|
||||
dcr_common_init(dcr);
|
||||
dcr->serial_number = ~handle[0];
|
||||
|
@ -1696,11 +1755,12 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
dcr->command_size = 8;
|
||||
dcr->status_offset = 8;
|
||||
dcr->status_size = 4;
|
||||
offset += dcr->header.length;
|
||||
|
||||
/* dcr-descriptor1: blk */
|
||||
dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
dcr->header.length = sizeof(struct acpi_nfit_control_region);
|
||||
dcr->header.length = sizeof(*dcr);
|
||||
dcr->region_index = 1+1;
|
||||
dcr_common_init(dcr);
|
||||
dcr->serial_number = ~handle[1];
|
||||
|
@ -1711,11 +1771,12 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
dcr->command_size = 8;
|
||||
dcr->status_offset = 8;
|
||||
dcr->status_size = 4;
|
||||
offset += dcr->header.length;
|
||||
|
||||
/* dcr-descriptor2: blk */
|
||||
dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
dcr->header.length = sizeof(struct acpi_nfit_control_region);
|
||||
dcr->header.length = sizeof(*dcr);
|
||||
dcr->region_index = 2+1;
|
||||
dcr_common_init(dcr);
|
||||
dcr->serial_number = ~handle[2];
|
||||
|
@ -1726,11 +1787,12 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
dcr->command_size = 8;
|
||||
dcr->status_offset = 8;
|
||||
dcr->status_size = 4;
|
||||
offset += dcr->header.length;
|
||||
|
||||
/* dcr-descriptor3: blk */
|
||||
dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
dcr->header.length = sizeof(struct acpi_nfit_control_region);
|
||||
dcr->header.length = sizeof(*dcr);
|
||||
dcr->region_index = 3+1;
|
||||
dcr_common_init(dcr);
|
||||
dcr->serial_number = ~handle[3];
|
||||
|
@ -1741,8 +1803,8 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
dcr->command_size = 8;
|
||||
dcr->status_offset = 8;
|
||||
dcr->status_size = 4;
|
||||
offset += dcr->header.length;
|
||||
|
||||
offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
|
||||
/* dcr-descriptor0: pmem */
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
|
@ -1753,10 +1815,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
dcr->serial_number = ~handle[0];
|
||||
dcr->code = NFIT_FIC_BYTEN;
|
||||
dcr->windows = 0;
|
||||
offset += dcr->header.length;
|
||||
|
||||
/* dcr-descriptor1: pmem */
|
||||
dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
|
||||
window_size);
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
dcr->header.length = offsetof(struct acpi_nfit_control_region,
|
||||
window_size);
|
||||
|
@ -1765,10 +1827,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
dcr->serial_number = ~handle[1];
|
||||
dcr->code = NFIT_FIC_BYTEN;
|
||||
dcr->windows = 0;
|
||||
offset += dcr->header.length;
|
||||
|
||||
/* dcr-descriptor2: pmem */
|
||||
dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
|
||||
window_size) * 2;
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
dcr->header.length = offsetof(struct acpi_nfit_control_region,
|
||||
window_size);
|
||||
|
@ -1777,10 +1839,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
dcr->serial_number = ~handle[2];
|
||||
dcr->code = NFIT_FIC_BYTEN;
|
||||
dcr->windows = 0;
|
||||
offset += dcr->header.length;
|
||||
|
||||
/* dcr-descriptor3: pmem */
|
||||
dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
|
||||
window_size) * 3;
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
dcr->header.length = offsetof(struct acpi_nfit_control_region,
|
||||
window_size);
|
||||
|
@ -1789,54 +1851,56 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
dcr->serial_number = ~handle[3];
|
||||
dcr->code = NFIT_FIC_BYTEN;
|
||||
dcr->windows = 0;
|
||||
offset += dcr->header.length;
|
||||
|
||||
offset = offset + offsetof(struct acpi_nfit_control_region,
|
||||
window_size) * 4;
|
||||
/* bdw0 (spa/dcr0, dimm0) */
|
||||
bdw = nfit_buf + offset;
|
||||
bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
|
||||
bdw->header.length = sizeof(struct acpi_nfit_data_region);
|
||||
bdw->header.length = sizeof(*bdw);
|
||||
bdw->region_index = 0+1;
|
||||
bdw->windows = 1;
|
||||
bdw->offset = 0;
|
||||
bdw->size = BDW_SIZE;
|
||||
bdw->capacity = DIMM_SIZE;
|
||||
bdw->start_address = 0;
|
||||
offset += bdw->header.length;
|
||||
|
||||
/* bdw1 (spa/dcr1, dimm1) */
|
||||
bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
|
||||
bdw = nfit_buf + offset;
|
||||
bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
|
||||
bdw->header.length = sizeof(struct acpi_nfit_data_region);
|
||||
bdw->header.length = sizeof(*bdw);
|
||||
bdw->region_index = 1+1;
|
||||
bdw->windows = 1;
|
||||
bdw->offset = 0;
|
||||
bdw->size = BDW_SIZE;
|
||||
bdw->capacity = DIMM_SIZE;
|
||||
bdw->start_address = 0;
|
||||
offset += bdw->header.length;
|
||||
|
||||
/* bdw2 (spa/dcr2, dimm2) */
|
||||
bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
|
||||
bdw = nfit_buf + offset;
|
||||
bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
|
||||
bdw->header.length = sizeof(struct acpi_nfit_data_region);
|
||||
bdw->header.length = sizeof(*bdw);
|
||||
bdw->region_index = 2+1;
|
||||
bdw->windows = 1;
|
||||
bdw->offset = 0;
|
||||
bdw->size = BDW_SIZE;
|
||||
bdw->capacity = DIMM_SIZE;
|
||||
bdw->start_address = 0;
|
||||
offset += bdw->header.length;
|
||||
|
||||
/* bdw3 (spa/dcr3, dimm3) */
|
||||
bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
|
||||
bdw = nfit_buf + offset;
|
||||
bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
|
||||
bdw->header.length = sizeof(struct acpi_nfit_data_region);
|
||||
bdw->header.length = sizeof(*bdw);
|
||||
bdw->region_index = 3+1;
|
||||
bdw->windows = 1;
|
||||
bdw->offset = 0;
|
||||
bdw->size = BDW_SIZE;
|
||||
bdw->capacity = DIMM_SIZE;
|
||||
bdw->start_address = 0;
|
||||
offset += bdw->header.length;
|
||||
|
||||
offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
|
||||
/* flush0 (dimm0) */
|
||||
flush = nfit_buf + offset;
|
||||
flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
|
||||
|
@ -1845,48 +1909,52 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
flush->hint_count = NUM_HINTS;
|
||||
for (i = 0; i < NUM_HINTS; i++)
|
||||
flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
|
||||
offset += flush->header.length;
|
||||
|
||||
/* flush1 (dimm1) */
|
||||
flush = nfit_buf + offset + flush_hint_size * 1;
|
||||
flush = nfit_buf + offset;
|
||||
flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
|
||||
flush->header.length = flush_hint_size;
|
||||
flush->device_handle = handle[1];
|
||||
flush->hint_count = NUM_HINTS;
|
||||
for (i = 0; i < NUM_HINTS; i++)
|
||||
flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
|
||||
offset += flush->header.length;
|
||||
|
||||
/* flush2 (dimm2) */
|
||||
flush = nfit_buf + offset + flush_hint_size * 2;
|
||||
flush = nfit_buf + offset;
|
||||
flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
|
||||
flush->header.length = flush_hint_size;
|
||||
flush->device_handle = handle[2];
|
||||
flush->hint_count = NUM_HINTS;
|
||||
for (i = 0; i < NUM_HINTS; i++)
|
||||
flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
|
||||
offset += flush->header.length;
|
||||
|
||||
/* flush3 (dimm3) */
|
||||
flush = nfit_buf + offset + flush_hint_size * 3;
|
||||
flush = nfit_buf + offset;
|
||||
flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
|
||||
flush->header.length = flush_hint_size;
|
||||
flush->device_handle = handle[3];
|
||||
flush->hint_count = NUM_HINTS;
|
||||
for (i = 0; i < NUM_HINTS; i++)
|
||||
flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
|
||||
offset += flush->header.length;
|
||||
|
||||
/* platform capabilities */
|
||||
pcap = nfit_buf + offset + flush_hint_size * 4;
|
||||
pcap = nfit_buf + offset;
|
||||
pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
|
||||
pcap->header.length = sizeof(*pcap);
|
||||
pcap->highest_capability = 1;
|
||||
pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH |
|
||||
ACPI_NFIT_CAPABILITY_MEM_FLUSH;
|
||||
offset += pcap->header.length;
|
||||
|
||||
if (t->setup_hotplug) {
|
||||
offset = offset + flush_hint_size * 4 + sizeof(*pcap);
|
||||
/* dcr-descriptor4: blk */
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
dcr->header.length = sizeof(struct acpi_nfit_control_region);
|
||||
dcr->header.length = sizeof(*dcr);
|
||||
dcr->region_index = 8+1;
|
||||
dcr_common_init(dcr);
|
||||
dcr->serial_number = ~handle[4];
|
||||
|
@ -1897,8 +1965,8 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
dcr->command_size = 8;
|
||||
dcr->status_offset = 8;
|
||||
dcr->status_size = 4;
|
||||
offset += dcr->header.length;
|
||||
|
||||
offset = offset + sizeof(struct acpi_nfit_control_region);
|
||||
/* dcr-descriptor4: pmem */
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
|
@ -1909,21 +1977,20 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
dcr->serial_number = ~handle[4];
|
||||
dcr->code = NFIT_FIC_BYTEN;
|
||||
dcr->windows = 0;
|
||||
offset += dcr->header.length;
|
||||
|
||||
offset = offset + offsetof(struct acpi_nfit_control_region,
|
||||
window_size);
|
||||
/* bdw4 (spa/dcr4, dimm4) */
|
||||
bdw = nfit_buf + offset;
|
||||
bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
|
||||
bdw->header.length = sizeof(struct acpi_nfit_data_region);
|
||||
bdw->header.length = sizeof(*bdw);
|
||||
bdw->region_index = 8+1;
|
||||
bdw->windows = 1;
|
||||
bdw->offset = 0;
|
||||
bdw->size = BDW_SIZE;
|
||||
bdw->capacity = DIMM_SIZE;
|
||||
bdw->start_address = 0;
|
||||
offset += bdw->header.length;
|
||||
|
||||
offset = offset + sizeof(struct acpi_nfit_data_region);
|
||||
/* spa10 (dcr4) dimm4 */
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
|
@ -1932,30 +1999,32 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
spa->range_index = 10+1;
|
||||
spa->address = t->dcr_dma[4];
|
||||
spa->length = DCR_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/*
|
||||
* spa11 (single-dimm interleave for hotplug, note storage
|
||||
* does not actually alias the related block-data-window
|
||||
* regions)
|
||||
*/
|
||||
spa = nfit_buf + offset + sizeof(*spa);
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
|
||||
spa->range_index = 11+1;
|
||||
spa->address = t->spa_set_dma[2];
|
||||
spa->length = SPA0_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/* spa12 (bdw for dcr4) dimm4 */
|
||||
spa = nfit_buf + offset + sizeof(*spa) * 2;
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
|
||||
spa->range_index = 12+1;
|
||||
spa->address = t->dimm_dma[4];
|
||||
spa->length = DIMM_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
offset = offset + sizeof(*spa) * 3;
|
||||
/* mem-region14 (spa/dcr4, dimm4) */
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
|
@ -1970,10 +2039,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = 0;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region15 (spa0, dimm4) */
|
||||
memdev = nfit_buf + offset +
|
||||
sizeof(struct acpi_nfit_memory_map);
|
||||
/* mem-region15 (spa11, dimm4) */
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[4];
|
||||
|
@ -1987,10 +2056,10 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* mem-region16 (spa/bdw4, dimm4) */
|
||||
memdev = nfit_buf + offset +
|
||||
sizeof(struct acpi_nfit_memory_map) * 2;
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
memdev->device_handle = handle[4];
|
||||
|
@ -2003,8 +2072,8 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
memdev->address = 0;
|
||||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
offset += memdev->header.length;
|
||||
|
||||
offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
|
||||
/* flush3 (dimm4) */
|
||||
flush = nfit_buf + offset;
|
||||
flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
|
||||
|
@ -2014,8 +2083,14 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
for (i = 0; i < NUM_HINTS; i++)
|
||||
flush->hint_address[i] = t->flush_dma[4]
|
||||
+ i * sizeof(u64);
|
||||
offset += flush->header.length;
|
||||
|
||||
/* sanity check to make sure we've filled the buffer */
|
||||
WARN_ON(offset != t->nfit_size);
|
||||
}
|
||||
|
||||
t->nfit_filled = offset;
|
||||
|
||||
post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
|
||||
SPA0_SIZE);
|
||||
|
||||
|
@ -2026,6 +2101,7 @@ static void nfit_test0_setup(struct nfit_test *t)
|
|||
set_bit(ND_INTEL_SMART, &acpi_desc->dimm_cmd_force_en);
|
||||
set_bit(ND_INTEL_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
|
||||
set_bit(ND_INTEL_SMART_SET_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
|
||||
set_bit(ND_INTEL_SMART_INJECT, &acpi_desc->dimm_cmd_force_en);
|
||||
set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
|
||||
set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
|
||||
set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
|
||||
|
@ -2061,17 +2137,18 @@ static void nfit_test1_setup(struct nfit_test *t)
|
|||
spa->range_index = 0+1;
|
||||
spa->address = t->spa_set_dma[0];
|
||||
spa->length = SPA2_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
/* virtual cd region */
|
||||
spa = nfit_buf + sizeof(*spa);
|
||||
spa = nfit_buf + offset;
|
||||
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
|
||||
spa->header.length = sizeof(*spa);
|
||||
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
|
||||
spa->range_index = 0;
|
||||
spa->address = t->spa_set_dma[1];
|
||||
spa->length = SPA_VCD_SIZE;
|
||||
offset += spa->header.length;
|
||||
|
||||
offset += sizeof(*spa) * 2;
|
||||
/* mem-region0 (spa0, dimm0) */
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
|
@ -2089,8 +2166,8 @@ static void nfit_test1_setup(struct nfit_test *t)
|
|||
memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
|
||||
| ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
|
||||
| ACPI_NFIT_MEM_NOT_ARMED;
|
||||
offset += memdev->header.length;
|
||||
|
||||
offset += sizeof(*memdev);
|
||||
/* dcr-descriptor0 */
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
|
@ -2101,8 +2178,8 @@ static void nfit_test1_setup(struct nfit_test *t)
|
|||
dcr->serial_number = ~handle[5];
|
||||
dcr->code = NFIT_FIC_BYTE;
|
||||
dcr->windows = 0;
|
||||
|
||||
offset += dcr->header.length;
|
||||
|
||||
memdev = nfit_buf + offset;
|
||||
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
|
||||
memdev->header.length = sizeof(*memdev);
|
||||
|
@ -2117,9 +2194,9 @@ static void nfit_test1_setup(struct nfit_test *t)
|
|||
memdev->interleave_index = 0;
|
||||
memdev->interleave_ways = 1;
|
||||
memdev->flags = ACPI_NFIT_MEM_MAP_FAILED;
|
||||
offset += memdev->header.length;
|
||||
|
||||
/* dcr-descriptor1 */
|
||||
offset += sizeof(*memdev);
|
||||
dcr = nfit_buf + offset;
|
||||
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
|
||||
dcr->header.length = offsetof(struct acpi_nfit_control_region,
|
||||
|
@ -2129,6 +2206,12 @@ static void nfit_test1_setup(struct nfit_test *t)
|
|||
dcr->serial_number = ~handle[6];
|
||||
dcr->code = NFIT_FIC_BYTE;
|
||||
dcr->windows = 0;
|
||||
offset += dcr->header.length;
|
||||
|
||||
/* sanity check to make sure we've filled the buffer */
|
||||
WARN_ON(offset != t->nfit_size);
|
||||
|
||||
t->nfit_filled = offset;
|
||||
|
||||
post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
|
||||
SPA2_SIZE);
|
||||
|
@ -2487,7 +2570,7 @@ static int nfit_test_probe(struct platform_device *pdev)
|
|||
nd_desc->ndctl = nfit_test_ctl;
|
||||
|
||||
rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
|
||||
nfit_test->nfit_size);
|
||||
nfit_test->nfit_filled);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
|
|
@ -93,6 +93,7 @@ struct nd_cmd_ars_err_inj_stat {
|
|||
#define ND_INTEL_FW_FINISH_UPDATE 15
|
||||
#define ND_INTEL_FW_FINISH_QUERY 16
|
||||
#define ND_INTEL_SMART_SET_THRESHOLD 17
|
||||
#define ND_INTEL_SMART_INJECT 18
|
||||
|
||||
#define ND_INTEL_SMART_HEALTH_VALID (1 << 0)
|
||||
#define ND_INTEL_SMART_SPARES_VALID (1 << 1)
|
||||
|
@ -111,6 +112,10 @@ struct nd_cmd_ars_err_inj_stat {
|
|||
#define ND_INTEL_SMART_NON_CRITICAL_HEALTH (1 << 0)
|
||||
#define ND_INTEL_SMART_CRITICAL_HEALTH (1 << 1)
|
||||
#define ND_INTEL_SMART_FATAL_HEALTH (1 << 2)
|
||||
#define ND_INTEL_SMART_INJECT_MTEMP (1 << 0)
|
||||
#define ND_INTEL_SMART_INJECT_SPARE (1 << 1)
|
||||
#define ND_INTEL_SMART_INJECT_FATAL (1 << 2)
|
||||
#define ND_INTEL_SMART_INJECT_SHUTDOWN (1 << 3)
|
||||
|
||||
struct nd_intel_smart {
|
||||
__u32 status;
|
||||
|
@ -158,6 +163,17 @@ struct nd_intel_smart_set_threshold {
|
|||
__u32 status;
|
||||
} __packed;
|
||||
|
||||
struct nd_intel_smart_inject {
|
||||
__u64 flags;
|
||||
__u8 mtemp_enable;
|
||||
__u16 media_temperature;
|
||||
__u8 spare_enable;
|
||||
__u8 spares;
|
||||
__u8 fatal_enable;
|
||||
__u8 unsafe_shutdown_enable;
|
||||
__u32 status;
|
||||
} __packed;
|
||||
|
||||
#define INTEL_FW_STORAGE_SIZE 0x100000
|
||||
#define INTEL_FW_MAX_SEND_LEN 0xFFEC
|
||||
#define INTEL_FW_QUERY_INTERVAL 250000
|
||||
|
|
Loading…
Reference in New Issue