2019-05-29 22:18:09 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-05-20 10:54:31 +08:00
|
|
|
/*
|
|
|
|
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
|
|
|
|
*/
|
|
|
|
#include <linux/list_sort.h>
|
|
|
|
#include <linux/libnvdimm.h>
|
|
|
|
#include <linux/module.h>
|
2015-06-25 16:21:02 +08:00
|
|
|
#include <linux/mutex.h>
|
2015-06-09 02:27:06 +08:00
|
|
|
#include <linux/ndctl.h>
|
2016-07-24 12:51:42 +08:00
|
|
|
#include <linux/sysfs.h>
|
2015-12-25 10:21:43 +08:00
|
|
|
#include <linux/delay.h>
|
2015-05-20 10:54:31 +08:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/acpi.h>
|
2015-05-02 01:11:27 +08:00
|
|
|
#include <linux/sort.h>
|
2015-06-25 16:21:02 +08:00
|
|
|
#include <linux/io.h>
|
2016-02-18 05:01:23 +08:00
|
|
|
#include <linux/nd.h>
|
2015-08-25 06:29:38 +08:00
|
|
|
#include <asm/cacheflush.h>
|
2018-03-13 02:24:28 +08:00
|
|
|
#include <acpi/nfit.h>
|
acpi/nfit: Add support for Intel DSM 1.8 commands
Add command definition for security commands defined in Intel DSM
specification v1.8 [1]. This includes "get security state", "set
passphrase", "unlock unit", "freeze lock", "secure erase", "overwrite",
"overwrite query", "master passphrase enable/disable", and "master
erase", . Since this adds several Intel definitions, move the relevant
bits to their own header.
These commands mutate physical data, but that manipulation is not cache
coherent. The requirement to flush and invalidate caches makes these
commands unsuitable to be called from userspace, so extra logic is added
to detect and block these commands from being submitted via the ioctl
command submission path.
Lastly, the commands may contain sensitive key material that should not
be dumped in a standard debug session. Update the nvdimm-command
payload-dump facility to move security command payloads behind a
default-off compile time switch.
[1]: http://pmem.io/documents/NVDIMM_DSM_Interface-V1.8.pdf
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-12-05 02:31:11 +08:00
|
|
|
#include "intel.h"
|
2015-05-20 10:54:31 +08:00
|
|
|
#include "nfit.h"
|
|
|
|
|
2015-06-25 16:21:02 +08:00
|
|
|
/*
|
|
|
|
* For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
|
|
|
|
* irrelevant.
|
|
|
|
*/
|
2015-08-28 15:27:14 +08:00
|
|
|
#include <linux/io-64-nonatomic-hi-lo.h>
|
2015-06-25 16:21:02 +08:00
|
|
|
|
2015-06-01 02:41:48 +08:00
|
|
|
static bool force_enable_dimms;
|
|
|
|
module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
|
|
|
|
|
2016-04-29 09:01:20 +08:00
|
|
|
static bool disable_vendor_specific;
|
|
|
|
module_param(disable_vendor_specific, bool, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(disable_vendor_specific,
|
2017-03-08 05:35:14 +08:00
|
|
|
"Limit commands to the publicly specified set");
|
2016-04-29 09:01:20 +08:00
|
|
|
|
2017-03-08 05:35:12 +08:00
|
|
|
static unsigned long override_dsm_mask;
|
|
|
|
module_param(override_dsm_mask, ulong, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
|
|
|
|
|
2017-03-08 05:35:13 +08:00
|
|
|
static int default_dsm_family = -1;
|
|
|
|
module_param(default_dsm_family, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(default_dsm_family,
|
|
|
|
"Try this DSM type first when identifying NVDIMM family");
|
|
|
|
|
2018-04-03 06:28:03 +08:00
|
|
|
static bool no_init_ars;
|
|
|
|
module_param(no_init_ars, bool, 0644);
|
|
|
|
MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
|
|
|
|
|
2019-02-04 03:17:27 +08:00
|
|
|
static bool force_labels;
|
|
|
|
module_param(force_labels, bool, 0444);
|
|
|
|
MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
|
|
|
|
|
2016-07-24 12:51:21 +08:00
|
|
|
LIST_HEAD(acpi_descs);
|
|
|
|
DEFINE_MUTEX(acpi_desc_lock);
|
|
|
|
|
2016-02-20 04:16:34 +08:00
|
|
|
static struct workqueue_struct *nfit_wq;
|
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_table_prev {
|
|
|
|
struct list_head spas;
|
|
|
|
struct list_head memdevs;
|
|
|
|
struct list_head dcrs;
|
|
|
|
struct list_head bdws;
|
|
|
|
struct list_head idts;
|
|
|
|
struct list_head flushes;
|
|
|
|
};
|
|
|
|
|
2017-06-06 00:40:42 +08:00
|
|
|
static guid_t nfit_uuid[NFIT_UUID_MAX];
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2017-06-06 00:40:42 +08:00
|
|
|
const guid_t *to_nfit_uuid(enum nfit_uuids id)
|
2015-05-20 10:54:31 +08:00
|
|
|
{
|
2017-06-06 00:40:42 +08:00
|
|
|
return &nfit_uuid[id];
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
2015-06-18 05:23:32 +08:00
|
|
|
EXPORT_SYMBOL(to_nfit_uuid);
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2020-07-21 06:07:40 +08:00
|
|
|
static const guid_t *to_nfit_bus_uuid(int family)
|
|
|
|
{
|
|
|
|
if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT,
|
|
|
|
"only secondary bus families can be translated\n"))
|
|
|
|
return NULL;
|
|
|
|
/*
|
|
|
|
* The index of bus UUIDs starts immediately following the last
|
|
|
|
* NVDIMM/leaf family.
|
|
|
|
*/
|
|
|
|
return to_nfit_uuid(family + NVDIMM_FAMILY_MAX);
|
|
|
|
}
|
|
|
|
|
2015-06-09 02:27:06 +08:00
|
|
|
static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
|
|
|
|
* acpi_device.
|
|
|
|
*/
|
|
|
|
if (!nd_desc->provider_name
|
|
|
|
|| strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return to_acpi_device(acpi_desc->dev);
|
|
|
|
}
|
|
|
|
|
2016-12-07 07:06:55 +08:00
|
|
|
static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
|
2016-02-13 09:01:11 +08:00
|
|
|
{
|
2016-03-04 08:08:54 +08:00
|
|
|
struct nd_cmd_clear_error *clear_err;
|
2016-02-13 09:01:11 +08:00
|
|
|
struct nd_cmd_ars_status *ars_status;
|
|
|
|
u16 flags;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case ND_CMD_ARS_CAP:
|
2016-09-22 00:21:26 +08:00
|
|
|
if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
|
2016-02-13 09:01:11 +08:00
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
/* Command failed */
|
2016-09-22 00:21:26 +08:00
|
|
|
if (status & 0xffff)
|
2016-02-13 09:01:11 +08:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/* No supported scan types for this range */
|
|
|
|
flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
|
2016-09-22 00:21:26 +08:00
|
|
|
if ((status >> 16 & flags) == 0)
|
2016-02-13 09:01:11 +08:00
|
|
|
return -ENOTTY;
|
2016-12-06 08:00:37 +08:00
|
|
|
return 0;
|
2016-02-13 09:01:11 +08:00
|
|
|
case ND_CMD_ARS_START:
|
|
|
|
/* ARS is in progress */
|
2016-09-22 00:21:26 +08:00
|
|
|
if ((status & 0xffff) == NFIT_ARS_START_BUSY)
|
2016-02-13 09:01:11 +08:00
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
/* Command failed */
|
2016-09-22 00:21:26 +08:00
|
|
|
if (status & 0xffff)
|
2016-02-13 09:01:11 +08:00
|
|
|
return -EIO;
|
2016-12-06 08:00:37 +08:00
|
|
|
return 0;
|
2016-02-13 09:01:11 +08:00
|
|
|
case ND_CMD_ARS_STATUS:
|
|
|
|
ars_status = buf;
|
|
|
|
/* Command failed */
|
2016-09-22 00:21:26 +08:00
|
|
|
if (status & 0xffff)
|
2016-02-13 09:01:11 +08:00
|
|
|
return -EIO;
|
|
|
|
/* Check extended status (Upper two bytes) */
|
2016-09-22 00:21:26 +08:00
|
|
|
if (status == NFIT_ARS_STATUS_DONE)
|
2016-02-13 09:01:11 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* ARS is in progress */
|
2016-09-22 00:21:26 +08:00
|
|
|
if (status == NFIT_ARS_STATUS_BUSY)
|
2016-02-13 09:01:11 +08:00
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
/* No ARS performed for the current boot */
|
2016-09-22 00:21:26 +08:00
|
|
|
if (status == NFIT_ARS_STATUS_NONE)
|
2016-02-13 09:01:11 +08:00
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ARS interrupted, either we overflowed or some other
|
|
|
|
* agent wants the scan to stop. If we didn't overflow
|
|
|
|
* then just continue with the returned results.
|
|
|
|
*/
|
2016-09-22 00:21:26 +08:00
|
|
|
if (status == NFIT_ARS_STATUS_INTR) {
|
2016-12-07 04:45:24 +08:00
|
|
|
if (ars_status->out_length >= 40 && (ars_status->flags
|
|
|
|
& NFIT_ARS_F_OVERFLOW))
|
2016-02-13 09:01:11 +08:00
|
|
|
return -ENOSPC;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unknown status */
|
2016-09-22 00:21:26 +08:00
|
|
|
if (status >> 16)
|
2016-02-13 09:01:11 +08:00
|
|
|
return -EIO;
|
2016-12-06 08:00:37 +08:00
|
|
|
return 0;
|
2016-03-04 08:08:54 +08:00
|
|
|
case ND_CMD_CLEAR_ERROR:
|
|
|
|
clear_err = buf;
|
2016-09-22 00:21:26 +08:00
|
|
|
if (status & 0xffff)
|
2016-03-04 08:08:54 +08:00
|
|
|
return -EIO;
|
|
|
|
if (!clear_err->cleared)
|
|
|
|
return -EIO;
|
|
|
|
if (clear_err->length > clear_err->cleared)
|
|
|
|
return clear_err->cleared;
|
2016-12-06 08:00:37 +08:00
|
|
|
return 0;
|
2016-02-13 09:01:11 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-09-22 00:21:26 +08:00
|
|
|
/* all other non-zero status results in an error */
|
|
|
|
if (status)
|
|
|
|
return -EIO;
|
2016-02-13 09:01:11 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-25 00:57:34 +08:00
|
|
|
#define ACPI_LABELS_LOCKED 3
|
|
|
|
|
|
|
|
static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
|
|
|
|
u32 status)
|
2017-05-05 02:47:22 +08:00
|
|
|
{
|
2017-09-25 00:57:34 +08:00
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
|
2017-05-05 02:47:22 +08:00
|
|
|
switch (cmd) {
|
|
|
|
case ND_CMD_GET_CONFIG_SIZE:
|
2017-09-25 00:57:34 +08:00
|
|
|
/*
|
|
|
|
* In the _LSI, _LSR, _LSW case the locked status is
|
|
|
|
* communicated via the read/write commands
|
|
|
|
*/
|
2018-09-27 01:48:38 +08:00
|
|
|
if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
|
2017-09-25 00:57:34 +08:00
|
|
|
break;
|
|
|
|
|
2017-05-05 02:47:22 +08:00
|
|
|
if (status >> 16 & ND_CONFIG_LOCKED)
|
|
|
|
return -EACCES;
|
|
|
|
break;
|
2017-09-25 00:57:34 +08:00
|
|
|
case ND_CMD_GET_CONFIG_DATA:
|
2018-09-27 01:48:38 +08:00
|
|
|
if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
|
|
|
|
&& status == ACPI_LABELS_LOCKED)
|
2017-09-25 00:57:34 +08:00
|
|
|
return -EACCES;
|
|
|
|
break;
|
|
|
|
case ND_CMD_SET_CONFIG_DATA:
|
2018-09-27 01:48:38 +08:00
|
|
|
if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
|
|
|
|
&& status == ACPI_LABELS_LOCKED)
|
2017-09-25 00:57:34 +08:00
|
|
|
return -EACCES;
|
|
|
|
break;
|
2017-05-05 02:47:22 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* all other non-zero status results in an error */
|
|
|
|
if (status)
|
|
|
|
return -EIO;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-07 07:06:55 +08:00
|
|
|
static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
|
|
|
|
u32 status)
|
|
|
|
{
|
|
|
|
if (!nvdimm)
|
|
|
|
return xlat_bus_status(buf, cmd, status);
|
2017-09-25 00:57:34 +08:00
|
|
|
return xlat_nvdimm_status(nvdimm, buf, cmd, status);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
|
|
|
|
static union acpi_object *pkg_to_buf(union acpi_object *pkg)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
void *dst;
|
|
|
|
size_t size = 0;
|
|
|
|
union acpi_object *buf = NULL;
|
|
|
|
|
|
|
|
if (pkg->type != ACPI_TYPE_PACKAGE) {
|
|
|
|
WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
|
|
|
|
pkg->type);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < pkg->package.count; i++) {
|
|
|
|
union acpi_object *obj = &pkg->package.elements[i];
|
|
|
|
|
|
|
|
if (obj->type == ACPI_TYPE_INTEGER)
|
|
|
|
size += 4;
|
|
|
|
else if (obj->type == ACPI_TYPE_BUFFER)
|
|
|
|
size += obj->buffer.length;
|
|
|
|
else {
|
|
|
|
WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
|
|
|
|
obj->type);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = ACPI_ALLOCATE(sizeof(*buf) + size);
|
|
|
|
if (!buf)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
dst = buf + 1;
|
|
|
|
buf->type = ACPI_TYPE_BUFFER;
|
|
|
|
buf->buffer.length = size;
|
|
|
|
buf->buffer.pointer = dst;
|
|
|
|
for (i = 0; i < pkg->package.count; i++) {
|
|
|
|
union acpi_object *obj = &pkg->package.elements[i];
|
|
|
|
|
|
|
|
if (obj->type == ACPI_TYPE_INTEGER) {
|
|
|
|
memcpy(dst, &obj->integer.value, 4);
|
|
|
|
dst += 4;
|
|
|
|
} else if (obj->type == ACPI_TYPE_BUFFER) {
|
|
|
|
memcpy(dst, obj->buffer.pointer, obj->buffer.length);
|
|
|
|
dst += obj->buffer.length;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err:
|
|
|
|
ACPI_FREE(pkg);
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static union acpi_object *int_to_buf(union acpi_object *integer)
|
|
|
|
{
|
|
|
|
union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
|
|
|
|
void *dst = NULL;
|
|
|
|
|
|
|
|
if (!buf)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (integer->type != ACPI_TYPE_INTEGER) {
|
|
|
|
WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
|
|
|
|
integer->type);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dst = buf + 1;
|
|
|
|
buf->type = ACPI_TYPE_BUFFER;
|
|
|
|
buf->buffer.length = 4;
|
|
|
|
buf->buffer.pointer = dst;
|
|
|
|
memcpy(dst, &integer->integer.value, 4);
|
|
|
|
err:
|
|
|
|
ACPI_FREE(integer);
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
|
|
|
|
u32 len, void *data)
|
|
|
|
{
|
|
|
|
acpi_status rc;
|
|
|
|
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
|
|
|
struct acpi_object_list input = {
|
|
|
|
.count = 3,
|
|
|
|
.pointer = (union acpi_object []) {
|
|
|
|
[0] = {
|
|
|
|
.integer.type = ACPI_TYPE_INTEGER,
|
|
|
|
.integer.value = offset,
|
|
|
|
},
|
|
|
|
[1] = {
|
|
|
|
.integer.type = ACPI_TYPE_INTEGER,
|
|
|
|
.integer.value = len,
|
|
|
|
},
|
|
|
|
[2] = {
|
|
|
|
.buffer.type = ACPI_TYPE_BUFFER,
|
|
|
|
.buffer.pointer = data,
|
|
|
|
.buffer.length = len,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
|
|
|
|
if (ACPI_FAILURE(rc))
|
|
|
|
return NULL;
|
|
|
|
return int_to_buf(buf.pointer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
|
|
|
|
u32 len)
|
|
|
|
{
|
|
|
|
acpi_status rc;
|
|
|
|
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
|
|
|
struct acpi_object_list input = {
|
|
|
|
.count = 2,
|
|
|
|
.pointer = (union acpi_object []) {
|
|
|
|
[0] = {
|
|
|
|
.integer.type = ACPI_TYPE_INTEGER,
|
|
|
|
.integer.value = offset,
|
|
|
|
},
|
|
|
|
[1] = {
|
|
|
|
.integer.type = ACPI_TYPE_INTEGER,
|
|
|
|
.integer.value = len,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
|
|
|
|
if (ACPI_FAILURE(rc))
|
|
|
|
return NULL;
|
|
|
|
return pkg_to_buf(buf.pointer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static union acpi_object *acpi_label_info(acpi_handle handle)
|
|
|
|
{
|
|
|
|
acpi_status rc;
|
|
|
|
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
|
|
|
|
|
|
|
rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
|
|
|
|
if (ACPI_FAILURE(rc))
|
|
|
|
return NULL;
|
|
|
|
return pkg_to_buf(buf.pointer);
|
2016-12-07 07:06:55 +08:00
|
|
|
}
|
|
|
|
|
2017-10-21 06:39:43 +08:00
|
|
|
static u8 nfit_dsm_revid(unsigned family, unsigned func)
|
|
|
|
{
|
2020-02-26 00:20:06 +08:00
|
|
|
static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
|
2017-10-21 06:39:43 +08:00
|
|
|
[NVDIMM_FAMILY_INTEL] = {
|
2020-07-21 06:07:40 +08:00
|
|
|
[NVDIMM_INTEL_GET_MODES ...
|
|
|
|
NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2,
|
2017-10-21 06:39:43 +08:00
|
|
|
},
|
|
|
|
};
|
|
|
|
u8 id;
|
|
|
|
|
|
|
|
if (family > NVDIMM_FAMILY_MAX)
|
|
|
|
return 0;
|
2020-02-26 00:20:06 +08:00
|
|
|
if (func > NVDIMM_CMD_MAX)
|
2017-10-21 06:39:43 +08:00
|
|
|
return 0;
|
|
|
|
id = revid_table[family][func];
|
|
|
|
if (id == 0)
|
|
|
|
return 1; /* default */
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
acpi/nfit: Add support for Intel DSM 1.8 commands
Add command definition for security commands defined in Intel DSM
specification v1.8 [1]. This includes "get security state", "set
passphrase", "unlock unit", "freeze lock", "secure erase", "overwrite",
"overwrite query", "master passphrase enable/disable", and "master
erase", . Since this adds several Intel definitions, move the relevant
bits to their own header.
These commands mutate physical data, but that manipulation is not cache
coherent. The requirement to flush and invalidate caches makes these
commands unsuitable to be called from userspace, so extra logic is added
to detect and block these commands from being submitted via the ioctl
command submission path.
Lastly, the commands may contain sensitive key material that should not
be dumped in a standard debug session. Update the nvdimm-command
payload-dump facility to move security command payloads behind a
default-off compile time switch.
[1]: http://pmem.io/documents/NVDIMM_DSM_Interface-V1.8.pdf
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-12-05 02:31:11 +08:00
|
|
|
static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
|
|
|
|
{
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
|
|
|
|
if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL
|
|
|
|
&& func >= NVDIMM_INTEL_GET_SECURITY_STATE
|
|
|
|
&& func <= NVDIMM_INTEL_MASTER_SECURE_ERASE)
|
|
|
|
return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-01-20 02:55:04 +08:00
|
|
|
static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
|
2020-07-21 06:07:40 +08:00
|
|
|
struct nd_cmd_pkg *call_pkg, int *family)
|
2019-01-20 02:55:04 +08:00
|
|
|
{
|
|
|
|
if (call_pkg) {
|
|
|
|
int i;
|
|
|
|
|
2019-02-08 06:56:50 +08:00
|
|
|
if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
|
2019-01-20 02:55:04 +08:00
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
|
|
|
|
if (call_pkg->nd_reserved2[i])
|
|
|
|
return -EINVAL;
|
2020-07-21 06:07:40 +08:00
|
|
|
*family = call_pkg->nd_family;
|
2019-01-20 02:55:04 +08:00
|
|
|
return call_pkg->nd_command;
|
|
|
|
}
|
|
|
|
|
2019-02-08 06:56:50 +08:00
|
|
|
/* In the !call_pkg case, bus commands == bus functions */
|
|
|
|
if (!nfit_mem)
|
|
|
|
return cmd;
|
|
|
|
|
2019-01-20 02:55:04 +08:00
|
|
|
/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
|
|
|
|
if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
|
|
|
|
return cmd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Force function number validation to fail since 0 is never
|
|
|
|
* published as a valid function in dsm_mask.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-06 05:43:25 +08:00
|
|
|
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|
|
|
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
|
2015-05-20 10:54:31 +08:00
|
|
|
{
|
2019-01-05 16:08:38 +08:00
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
2017-09-25 00:57:34 +08:00
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
2015-06-09 02:27:06 +08:00
|
|
|
union acpi_object in_obj, in_buf, *out_obj;
|
2016-04-29 07:23:43 +08:00
|
|
|
const struct nd_cmd_desc *desc = NULL;
|
2015-06-09 02:27:06 +08:00
|
|
|
struct device *dev = acpi_desc->dev;
|
2016-04-29 07:23:43 +08:00
|
|
|
struct nd_cmd_pkg *call_pkg = NULL;
|
2015-06-09 02:27:06 +08:00
|
|
|
const char *cmd_name, *dimm_name;
|
2016-04-29 07:23:43 +08:00
|
|
|
unsigned long cmd_mask, dsm_mask;
|
2016-09-22 00:21:26 +08:00
|
|
|
u32 offset, fw_status = 0;
|
2015-06-09 02:27:06 +08:00
|
|
|
acpi_handle handle;
|
2017-06-06 00:40:42 +08:00
|
|
|
const guid_t *guid;
|
2019-01-20 02:55:04 +08:00
|
|
|
int func, rc, i;
|
2020-07-21 06:07:40 +08:00
|
|
|
int family = 0;
|
2015-06-09 02:27:06 +08:00
|
|
|
|
2018-07-12 01:10:11 +08:00
|
|
|
if (cmd_rc)
|
|
|
|
*cmd_rc = -EINVAL;
|
2016-04-29 07:23:43 +08:00
|
|
|
|
2019-02-08 06:56:50 +08:00
|
|
|
if (cmd == ND_CMD_CALL)
|
|
|
|
call_pkg = buf;
|
2020-07-21 06:07:40 +08:00
|
|
|
func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
|
2019-02-08 06:56:50 +08:00
|
|
|
if (func < 0)
|
|
|
|
return func;
|
|
|
|
|
2015-06-09 02:27:06 +08:00
|
|
|
if (nvdimm) {
|
|
|
|
struct acpi_device *adev = nfit_mem->adev;
|
|
|
|
|
|
|
|
if (!adev)
|
|
|
|
return -ENOTTY;
|
2016-04-29 07:23:43 +08:00
|
|
|
|
2015-06-25 16:21:02 +08:00
|
|
|
dimm_name = nvdimm_name(nvdimm);
|
2015-06-09 02:27:06 +08:00
|
|
|
cmd_name = nvdimm_cmd_name(cmd);
|
2016-04-29 07:17:07 +08:00
|
|
|
cmd_mask = nvdimm_cmd_mask(nvdimm);
|
2015-06-09 02:27:06 +08:00
|
|
|
dsm_mask = nfit_mem->dsm_mask;
|
|
|
|
desc = nd_cmd_dimm_desc(cmd);
|
2017-06-06 00:40:42 +08:00
|
|
|
guid = to_nfit_uuid(nfit_mem->family);
|
2015-06-09 02:27:06 +08:00
|
|
|
handle = adev->handle;
|
|
|
|
} else {
|
|
|
|
struct acpi_device *adev = to_acpi_dev(acpi_desc);
|
|
|
|
|
|
|
|
cmd_name = nvdimm_bus_cmd_name(cmd);
|
2016-04-29 07:17:07 +08:00
|
|
|
cmd_mask = nd_desc->cmd_mask;
|
2020-07-21 06:07:40 +08:00
|
|
|
if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
|
|
|
|
family = call_pkg->nd_family;
|
|
|
|
if (!test_bit(family, &nd_desc->bus_family_mask))
|
|
|
|
return -EINVAL;
|
|
|
|
dsm_mask = acpi_desc->family_dsm_mask[family];
|
|
|
|
guid = to_nfit_bus_uuid(family);
|
|
|
|
} else {
|
|
|
|
dsm_mask = acpi_desc->bus_dsm_mask;
|
|
|
|
guid = to_nfit_uuid(NFIT_DEV_BUS);
|
|
|
|
}
|
2015-06-09 02:27:06 +08:00
|
|
|
desc = nd_cmd_bus_desc(cmd);
|
|
|
|
handle = adev->handle;
|
|
|
|
dimm_name = "bus";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
|
|
|
|
return -ENOTTY;
|
|
|
|
|
2019-01-20 02:55:04 +08:00
|
|
|
/*
|
|
|
|
* Check for a valid command. For ND_CMD_CALL, we also have to
|
|
|
|
* make sure that the DSM function is supported.
|
|
|
|
*/
|
2020-02-26 00:20:06 +08:00
|
|
|
if (cmd == ND_CMD_CALL &&
|
|
|
|
(func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask)))
|
2019-01-20 02:55:04 +08:00
|
|
|
return -ENOTTY;
|
|
|
|
else if (!test_bit(cmd, &cmd_mask))
|
2015-06-09 02:27:06 +08:00
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
in_obj.type = ACPI_TYPE_PACKAGE;
|
|
|
|
in_obj.package.count = 1;
|
|
|
|
in_obj.package.elements = &in_buf;
|
|
|
|
in_buf.type = ACPI_TYPE_BUFFER;
|
|
|
|
in_buf.buffer.pointer = buf;
|
|
|
|
in_buf.buffer.length = 0;
|
|
|
|
|
|
|
|
/* libnvdimm has already validated the input envelope */
|
|
|
|
for (i = 0; i < desc->in_num; i++)
|
|
|
|
in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
|
|
|
|
i, buf);
|
|
|
|
|
2016-04-29 07:23:43 +08:00
|
|
|
if (call_pkg) {
|
|
|
|
/* skip over package wrapper */
|
|
|
|
in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
|
|
|
|
in_buf.buffer.length = call_pkg->nd_size_in;
|
|
|
|
}
|
|
|
|
|
2020-07-21 06:07:40 +08:00
|
|
|
dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n",
|
|
|
|
dimm_name, cmd, family, func, in_buf.buffer.length);
|
acpi/nfit: Add support for Intel DSM 1.8 commands
Add command definition for security commands defined in Intel DSM
specification v1.8 [1]. This includes "get security state", "set
passphrase", "unlock unit", "freeze lock", "secure erase", "overwrite",
"overwrite query", "master passphrase enable/disable", and "master
erase", . Since this adds several Intel definitions, move the relevant
bits to their own header.
These commands mutate physical data, but that manipulation is not cache
coherent. The requirement to flush and invalidate caches makes these
commands unsuitable to be called from userspace, so extra logic is added
to detect and block these commands from being submitted via the ioctl
command submission path.
Lastly, the commands may contain sensitive key material that should not
be dumped in a standard debug session. Update the nvdimm-command
payload-dump facility to move security command payloads behind a
default-off compile time switch.
[1]: http://pmem.io/documents/NVDIMM_DSM_Interface-V1.8.pdf
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-12-05 02:31:11 +08:00
|
|
|
if (payload_dumpable(nvdimm, func))
|
|
|
|
print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
|
|
|
|
in_buf.buffer.pointer,
|
|
|
|
min_t(u32, 256, in_buf.buffer.length), true);
|
2015-06-09 02:27:06 +08:00
|
|
|
|
2017-09-25 00:57:34 +08:00
|
|
|
/* call the BIOS, prefer the named methods over _DSM if available */
|
2018-09-27 01:48:38 +08:00
|
|
|
if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
|
|
|
|
&& test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
|
2017-09-25 00:57:34 +08:00
|
|
|
out_obj = acpi_label_info(handle);
|
2018-09-27 01:48:38 +08:00
|
|
|
else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
|
|
|
|
&& test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
|
2017-09-25 00:57:34 +08:00
|
|
|
struct nd_cmd_get_config_data_hdr *p = buf;
|
|
|
|
|
|
|
|
out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
|
2017-11-13 06:57:09 +08:00
|
|
|
} else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
|
2018-09-27 01:48:38 +08:00
|
|
|
&& test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
|
2017-09-25 00:57:34 +08:00
|
|
|
struct nd_cmd_set_config_hdr *p = buf;
|
|
|
|
|
|
|
|
out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
|
|
|
|
p->in_buf);
|
2017-10-21 06:39:43 +08:00
|
|
|
} else {
|
|
|
|
u8 revid;
|
|
|
|
|
2017-11-13 06:57:09 +08:00
|
|
|
if (nvdimm)
|
2017-10-21 06:39:43 +08:00
|
|
|
revid = nfit_dsm_revid(nfit_mem->family, func);
|
|
|
|
else
|
|
|
|
revid = 1;
|
|
|
|
out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
|
|
|
|
}
|
2017-09-25 00:57:34 +08:00
|
|
|
|
2015-06-09 02:27:06 +08:00
|
|
|
if (!out_obj) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
|
2015-06-09 02:27:06 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-01-30 09:23:01 +08:00
|
|
|
if (out_obj->type != ACPI_TYPE_BUFFER) {
|
|
|
|
dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
|
|
|
|
dimm_name, cmd_name, out_obj->type);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-03-13 03:28:03 +08:00
|
|
|
dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
|
|
|
|
cmd_name, out_obj->buffer.length);
|
|
|
|
print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
|
|
|
|
out_obj->buffer.pointer,
|
|
|
|
min_t(u32, 128, out_obj->buffer.length), true);
|
|
|
|
|
2016-04-29 07:23:43 +08:00
|
|
|
if (call_pkg) {
|
|
|
|
call_pkg->nd_fw_size = out_obj->buffer.length;
|
|
|
|
memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
|
|
|
|
out_obj->buffer.pointer,
|
|
|
|
min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
|
|
|
|
|
|
|
|
ACPI_FREE(out_obj);
|
|
|
|
/*
|
|
|
|
* Need to support FW function w/o known size in advance.
|
|
|
|
* Caller can determine required size based upon nd_fw_size.
|
|
|
|
* If we return an error (like elsewhere) then caller wouldn't
|
|
|
|
* be able to rely upon data returned to make calculation.
|
|
|
|
*/
|
2018-07-12 01:10:11 +08:00
|
|
|
if (cmd_rc)
|
|
|
|
*cmd_rc = 0;
|
2016-04-29 07:23:43 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-09 02:27:06 +08:00
|
|
|
for (i = 0, offset = 0; i < desc->out_num; i++) {
|
|
|
|
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
|
2016-12-07 01:10:12 +08:00
|
|
|
(u32 *) out_obj->buffer.pointer,
|
|
|
|
out_obj->buffer.length - offset);
|
2015-06-09 02:27:06 +08:00
|
|
|
|
|
|
|
if (offset + out_size > out_obj->buffer.length) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
|
|
|
|
dimm_name, cmd_name, i);
|
2015-06-09 02:27:06 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (in_buf.buffer.length + offset + out_size > buf_len) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
|
|
|
|
dimm_name, cmd_name, i);
|
2015-06-09 02:27:06 +08:00
|
|
|
rc = -ENXIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
memcpy(buf + in_buf.buffer.length + offset,
|
|
|
|
out_obj->buffer.pointer + offset, out_size);
|
|
|
|
offset += out_size;
|
|
|
|
}
|
2016-09-22 00:21:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set fw_status for all the commands with a known format to be
|
|
|
|
* later interpreted by xlat_status().
|
|
|
|
*/
|
2017-11-13 06:57:09 +08:00
|
|
|
if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
|
|
|
|
&& cmd <= ND_CMD_CLEAR_ERROR)
|
|
|
|
|| (nvdimm && cmd >= ND_CMD_SMART
|
|
|
|
&& cmd <= ND_CMD_VENDOR)))
|
2016-09-22 00:21:26 +08:00
|
|
|
fw_status = *(u32 *) out_obj->buffer.pointer;
|
|
|
|
|
2015-06-09 02:27:06 +08:00
|
|
|
if (offset + in_buf.buffer.length < buf_len) {
|
|
|
|
if (i >= 1) {
|
|
|
|
/*
|
|
|
|
* status valid, return the number of bytes left
|
|
|
|
* unfilled in the output buffer
|
|
|
|
*/
|
|
|
|
rc = buf_len - offset - in_buf.buffer.length;
|
2016-02-13 09:01:11 +08:00
|
|
|
if (cmd_rc)
|
2016-12-07 07:06:55 +08:00
|
|
|
*cmd_rc = xlat_status(nvdimm, buf, cmd,
|
|
|
|
fw_status);
|
2015-06-09 02:27:06 +08:00
|
|
|
} else {
|
|
|
|
dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
|
|
|
|
__func__, dimm_name, cmd_name, buf_len,
|
|
|
|
offset);
|
|
|
|
rc = -ENXIO;
|
|
|
|
}
|
2016-05-03 00:11:53 +08:00
|
|
|
} else {
|
2015-06-09 02:27:06 +08:00
|
|
|
rc = 0;
|
2016-05-03 00:11:53 +08:00
|
|
|
if (cmd_rc)
|
2016-12-07 07:06:55 +08:00
|
|
|
*cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
|
2016-05-03 00:11:53 +08:00
|
|
|
}
|
2015-06-09 02:27:06 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
ACPI_FREE(out_obj);
|
|
|
|
|
|
|
|
return rc;
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
2016-12-06 05:43:25 +08:00
|
|
|
EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
|
2015-05-20 10:54:31 +08:00
|
|
|
|
|
|
|
static const char *spa_type_name(u16 type)
|
|
|
|
{
|
|
|
|
static const char *to_name[] = {
|
|
|
|
[NFIT_SPA_VOLATILE] = "volatile",
|
|
|
|
[NFIT_SPA_PM] = "pmem",
|
|
|
|
[NFIT_SPA_DCR] = "dimm-control-region",
|
|
|
|
[NFIT_SPA_BDW] = "block-data-window",
|
|
|
|
[NFIT_SPA_VDISK] = "volatile-disk",
|
|
|
|
[NFIT_SPA_VCD] = "volatile-cd",
|
|
|
|
[NFIT_SPA_PDISK] = "persistent-disk",
|
|
|
|
[NFIT_SPA_PCD] = "persistent-cd",
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
if (type > NFIT_SPA_PCD)
|
|
|
|
return "unknown";
|
|
|
|
|
|
|
|
return to_name[type];
|
|
|
|
}
|
|
|
|
|
2016-07-24 12:51:21 +08:00
|
|
|
int nfit_spa_type(struct acpi_nfit_system_address *spa)
|
2015-05-20 10:54:31 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NFIT_UUID_MAX; i++)
|
2017-06-06 00:40:42 +08:00
|
|
|
if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
|
2015-05-20 10:54:31 +08:00
|
|
|
return i;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool add_spa(struct acpi_nfit_desc *acpi_desc,
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_table_prev *prev,
|
2015-05-20 10:54:31 +08:00
|
|
|
struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_spa *nfit_spa;
|
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
if (spa->header.length != sizeof(*spa))
|
|
|
|
return false;
|
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
list_for_each_entry(nfit_spa, &prev->spas, list) {
|
2016-07-15 08:22:48 +08:00
|
|
|
if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
|
2015-10-28 06:58:27 +08:00
|
|
|
list_move_tail(&nfit_spa->list, &acpi_desc->spas);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
|
|
|
|
GFP_KERNEL);
|
2015-05-20 10:54:31 +08:00
|
|
|
if (!nfit_spa)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_spa->list);
|
2016-07-15 08:22:48 +08:00
|
|
|
memcpy(nfit_spa->spa, spa, sizeof(*spa));
|
2015-05-20 10:54:31 +08:00
|
|
|
list_add_tail(&nfit_spa->list, &acpi_desc->spas);
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "spa index: %d type: %s\n",
|
2015-05-20 10:54:31 +08:00
|
|
|
spa->range_index,
|
|
|
|
spa_type_name(nfit_spa_type(spa)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_table_prev *prev,
|
2015-05-20 10:54:31 +08:00
|
|
|
struct acpi_nfit_memory_map *memdev)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_memdev *nfit_memdev;
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
if (memdev->header.length != sizeof(*memdev))
|
|
|
|
return false;
|
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
list_for_each_entry(nfit_memdev, &prev->memdevs, list)
|
2016-07-15 08:22:48 +08:00
|
|
|
if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
|
2015-10-28 06:58:27 +08:00
|
|
|
list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
|
|
|
|
GFP_KERNEL);
|
2015-05-20 10:54:31 +08:00
|
|
|
if (!nfit_memdev)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_memdev->list);
|
2016-07-15 08:22:48 +08:00
|
|
|
memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
|
2015-05-20 10:54:31 +08:00
|
|
|
list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
|
|
|
|
memdev->device_handle, memdev->range_index,
|
2017-04-15 01:27:11 +08:00
|
|
|
memdev->region_index, memdev->flags);
|
2015-05-20 10:54:31 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-03-13 02:24:28 +08:00
|
|
|
int nfit_get_smbios_id(u32 device_handle, u16 *flags)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_memory_map *memdev;
|
|
|
|
struct acpi_nfit_desc *acpi_desc;
|
|
|
|
struct nfit_mem *nfit_mem;
|
2019-01-12 06:46:37 +08:00
|
|
|
u16 physical_id;
|
2018-03-13 02:24:28 +08:00
|
|
|
|
|
|
|
mutex_lock(&acpi_desc_lock);
|
|
|
|
list_for_each_entry(acpi_desc, &acpi_descs, list) {
|
|
|
|
mutex_lock(&acpi_desc->init_mutex);
|
|
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
|
|
|
|
memdev = __to_nfit_memdev(nfit_mem);
|
|
|
|
if (memdev->device_handle == device_handle) {
|
2019-01-12 06:46:37 +08:00
|
|
|
*flags = memdev->flags;
|
|
|
|
physical_id = memdev->physical_id;
|
2018-03-13 02:24:28 +08:00
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
|
|
mutex_unlock(&acpi_desc_lock);
|
2019-01-12 06:46:37 +08:00
|
|
|
return physical_id;
|
2018-03-13 02:24:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
|
|
}
|
|
|
|
mutex_unlock(&acpi_desc_lock);
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfit_get_smbios_id);
|
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
/*
|
|
|
|
* An implementation may provide a truncated control region if no block windows
|
|
|
|
* are defined.
|
|
|
|
*/
|
|
|
|
static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
|
|
|
|
{
|
|
|
|
if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
|
|
|
|
window_size))
|
|
|
|
return 0;
|
|
|
|
if (dcr->windows)
|
|
|
|
return sizeof(*dcr);
|
|
|
|
return offsetof(struct acpi_nfit_control_region, window_size);
|
|
|
|
}
|
|
|
|
|
2015-05-20 10:54:31 +08:00
|
|
|
static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_table_prev *prev,
|
2015-05-20 10:54:31 +08:00
|
|
|
struct acpi_nfit_control_region *dcr)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_dcr *nfit_dcr;
|
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
if (!sizeof_dcr(dcr))
|
|
|
|
return false;
|
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
list_for_each_entry(nfit_dcr, &prev->dcrs, list)
|
2016-07-15 08:22:48 +08:00
|
|
|
if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
|
2015-10-28 06:58:27 +08:00
|
|
|
list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
|
|
|
|
return true;
|
|
|
|
}
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
|
|
|
|
GFP_KERNEL);
|
2015-05-20 10:54:31 +08:00
|
|
|
if (!nfit_dcr)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_dcr->list);
|
2016-07-15 08:22:48 +08:00
|
|
|
memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
|
2015-05-20 10:54:31 +08:00
|
|
|
list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "dcr index: %d windows: %d\n",
|
2015-05-20 10:54:31 +08:00
|
|
|
dcr->region_index, dcr->windows);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_table_prev *prev,
|
2015-05-20 10:54:31 +08:00
|
|
|
struct acpi_nfit_data_region *bdw)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_bdw *nfit_bdw;
|
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
if (bdw->header.length != sizeof(*bdw))
|
|
|
|
return false;
|
2015-10-28 06:58:27 +08:00
|
|
|
list_for_each_entry(nfit_bdw, &prev->bdws, list)
|
2016-07-15 08:22:48 +08:00
|
|
|
if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
|
2015-10-28 06:58:27 +08:00
|
|
|
list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
|
|
|
|
return true;
|
|
|
|
}
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
|
|
|
|
GFP_KERNEL);
|
2015-05-20 10:54:31 +08:00
|
|
|
if (!nfit_bdw)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_bdw->list);
|
2016-07-15 08:22:48 +08:00
|
|
|
memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
|
2015-05-20 10:54:31 +08:00
|
|
|
list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "bdw dcr: %d windows: %d\n",
|
2015-05-20 10:54:31 +08:00
|
|
|
bdw->region_index, bdw->windows);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
|
|
|
|
{
|
|
|
|
if (idt->header.length < sizeof(*idt))
|
|
|
|
return 0;
|
|
|
|
return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
|
|
|
|
}
|
|
|
|
|
2015-06-25 16:21:02 +08:00
|
|
|
static bool add_idt(struct acpi_nfit_desc *acpi_desc,
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_table_prev *prev,
|
2015-06-25 16:21:02 +08:00
|
|
|
struct acpi_nfit_interleave *idt)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_idt *nfit_idt;
|
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
if (!sizeof_idt(idt))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_idt, &prev->idts, list) {
|
|
|
|
if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
|
2015-10-28 06:58:27 +08:00
|
|
|
list_move_tail(&nfit_idt->list, &acpi_desc->idts);
|
|
|
|
return true;
|
|
|
|
}
|
2016-07-15 08:22:48 +08:00
|
|
|
}
|
2015-06-25 16:21:02 +08:00
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
|
|
|
|
GFP_KERNEL);
|
2015-06-25 16:21:02 +08:00
|
|
|
if (!nfit_idt)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_idt->list);
|
2016-07-15 08:22:48 +08:00
|
|
|
memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
|
2015-06-25 16:21:02 +08:00
|
|
|
list_add_tail(&nfit_idt->list, &acpi_desc->idts);
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "idt index: %d num_lines: %d\n",
|
2015-06-25 16:21:02 +08:00
|
|
|
idt->interleave_index, idt->line_count);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
|
|
|
|
{
|
|
|
|
if (flush->header.length < sizeof(*flush))
|
|
|
|
return 0;
|
|
|
|
return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
|
|
|
|
}
|
|
|
|
|
2015-07-11 01:06:13 +08:00
|
|
|
static bool add_flush(struct acpi_nfit_desc *acpi_desc,
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_table_prev *prev,
|
2015-07-11 01:06:13 +08:00
|
|
|
struct acpi_nfit_flush_address *flush)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_flush *nfit_flush;
|
2015-07-11 01:06:13 +08:00
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
if (!sizeof_flush(flush))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_flush, &prev->flushes, list) {
|
|
|
|
if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (memcmp(nfit_flush->flush, flush,
|
|
|
|
sizeof_flush(flush)) == 0) {
|
2015-10-28 06:58:27 +08:00
|
|
|
list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
|
|
|
|
return true;
|
|
|
|
}
|
2016-07-15 08:22:48 +08:00
|
|
|
}
|
2015-10-28 06:58:27 +08:00
|
|
|
|
2016-07-15 08:22:48 +08:00
|
|
|
nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
|
|
|
|
+ sizeof_flush(flush), GFP_KERNEL);
|
2015-07-11 01:06:13 +08:00
|
|
|
if (!nfit_flush)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_flush->list);
|
2016-07-15 08:22:48 +08:00
|
|
|
memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
|
2015-07-11 01:06:13 +08:00
|
|
|
list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
|
2015-07-11 01:06:13 +08:00
|
|
|
flush->device_handle, flush->hint_count);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-02-01 03:45:38 +08:00
|
|
|
static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct acpi_nfit_capabilities *pcap)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
u32 mask;
|
|
|
|
|
|
|
|
mask = (1 << (pcap->highest_capability + 1)) - 1;
|
|
|
|
acpi_desc->platform_cap = pcap->capabilities & mask;
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
|
2018-02-01 03:45:38 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
static void *add_table(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_table_prev *prev, void *table, const void *end)
|
2015-05-20 10:54:31 +08:00
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
struct acpi_nfit_header *hdr;
|
|
|
|
void *err = ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
if (table >= end)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
hdr = table;
|
2015-10-28 06:58:26 +08:00
|
|
|
if (!hdr->length) {
|
|
|
|
dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
|
|
|
|
hdr->type);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-05-20 10:54:31 +08:00
|
|
|
switch (hdr->type) {
|
|
|
|
case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
|
2015-10-28 06:58:27 +08:00
|
|
|
if (!add_spa(acpi_desc, prev, table))
|
2015-05-20 10:54:31 +08:00
|
|
|
return err;
|
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_MEMORY_MAP:
|
2015-10-28 06:58:27 +08:00
|
|
|
if (!add_memdev(acpi_desc, prev, table))
|
2015-05-20 10:54:31 +08:00
|
|
|
return err;
|
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_CONTROL_REGION:
|
2015-10-28 06:58:27 +08:00
|
|
|
if (!add_dcr(acpi_desc, prev, table))
|
2015-05-20 10:54:31 +08:00
|
|
|
return err;
|
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_DATA_REGION:
|
2015-10-28 06:58:27 +08:00
|
|
|
if (!add_bdw(acpi_desc, prev, table))
|
2015-05-20 10:54:31 +08:00
|
|
|
return err;
|
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_INTERLEAVE:
|
2015-10-28 06:58:27 +08:00
|
|
|
if (!add_idt(acpi_desc, prev, table))
|
2015-06-25 16:21:02 +08:00
|
|
|
return err;
|
2015-05-20 10:54:31 +08:00
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
|
2015-10-28 06:58:27 +08:00
|
|
|
if (!add_flush(acpi_desc, prev, table))
|
2015-07-11 01:06:13 +08:00
|
|
|
return err;
|
2015-05-20 10:54:31 +08:00
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_SMBIOS:
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "smbios\n");
|
2015-05-20 10:54:31 +08:00
|
|
|
break;
|
2018-02-01 03:45:38 +08:00
|
|
|
case ACPI_NFIT_TYPE_CAPABILITIES:
|
|
|
|
if (!add_platform_cap(acpi_desc, table))
|
|
|
|
return err;
|
|
|
|
break;
|
2015-05-20 10:54:31 +08:00
|
|
|
default:
|
|
|
|
dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return table + hdr->length;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_mem *nfit_mem)
|
|
|
|
{
|
|
|
|
u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
|
|
|
|
u16 dcr = nfit_mem->dcr->region_index;
|
|
|
|
struct nfit_spa *nfit_spa;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
|
|
|
u16 range_index = nfit_spa->spa->range_index;
|
|
|
|
int type = nfit_spa_type(nfit_spa->spa);
|
|
|
|
struct nfit_memdev *nfit_memdev;
|
|
|
|
|
|
|
|
if (type != NFIT_SPA_BDW)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
|
|
|
if (nfit_memdev->memdev->range_index != range_index)
|
|
|
|
continue;
|
|
|
|
if (nfit_memdev->memdev->device_handle != device_handle)
|
|
|
|
continue;
|
|
|
|
if (nfit_memdev->memdev->region_index != dcr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nfit_mem->spa_bdw = nfit_spa->spa;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
|
|
|
|
nfit_mem->spa_dcr->range_index);
|
|
|
|
nfit_mem->bdw = NULL;
|
|
|
|
}
|
|
|
|
|
2016-02-05 08:51:00 +08:00
|
|
|
static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
|
2015-05-20 10:54:31 +08:00
|
|
|
struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
|
2015-06-25 16:21:02 +08:00
|
|
|
struct nfit_memdev *nfit_memdev;
|
2015-05-20 10:54:31 +08:00
|
|
|
struct nfit_bdw *nfit_bdw;
|
2015-06-25 16:21:02 +08:00
|
|
|
struct nfit_idt *nfit_idt;
|
|
|
|
u16 idt_idx, range_index;
|
2015-05-20 10:54:31 +08:00
|
|
|
|
|
|
|
list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
|
|
|
|
if (nfit_bdw->bdw->region_index != dcr)
|
|
|
|
continue;
|
|
|
|
nfit_mem->bdw = nfit_bdw->bdw;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nfit_mem->bdw)
|
2016-02-05 08:51:00 +08:00
|
|
|
return;
|
2015-05-20 10:54:31 +08:00
|
|
|
|
|
|
|
nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
|
2015-06-25 16:21:02 +08:00
|
|
|
|
|
|
|
if (!nfit_mem->spa_bdw)
|
2016-02-05 08:51:00 +08:00
|
|
|
return;
|
2015-06-25 16:21:02 +08:00
|
|
|
|
|
|
|
range_index = nfit_mem->spa_bdw->range_index;
|
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
|
|
|
if (nfit_memdev->memdev->range_index != range_index ||
|
|
|
|
nfit_memdev->memdev->region_index != dcr)
|
|
|
|
continue;
|
|
|
|
nfit_mem->memdev_bdw = nfit_memdev->memdev;
|
|
|
|
idt_idx = nfit_memdev->memdev->interleave_index;
|
|
|
|
list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
|
|
|
|
if (nfit_idt->idt->interleave_index != idt_idx)
|
|
|
|
continue;
|
|
|
|
nfit_mem->idt_bdw = nfit_idt->idt;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-14 10:46:36 +08:00
|
|
|
static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
|
2015-05-20 10:54:31 +08:00
|
|
|
struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
struct nfit_mem *nfit_mem, *found;
|
|
|
|
struct nfit_memdev *nfit_memdev;
|
2017-04-14 10:46:36 +08:00
|
|
|
int type = spa ? nfit_spa_type(spa) : 0;
|
2015-05-20 10:54:31 +08:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case NFIT_SPA_DCR:
|
|
|
|
case NFIT_SPA_PM:
|
|
|
|
break;
|
|
|
|
default:
|
2017-04-14 10:46:36 +08:00
|
|
|
if (spa)
|
|
|
|
return 0;
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-14 10:46:36 +08:00
|
|
|
/*
|
|
|
|
* This loop runs in two modes, when a dimm is mapped the loop
|
|
|
|
* adds memdev associations to an existing dimm, or creates a
|
|
|
|
* dimm. In the unmapped dimm case this loop sweeps for memdev
|
|
|
|
* instances with an invalid / zero range_index and adds those
|
|
|
|
* dimms without spa associations.
|
|
|
|
*/
|
2015-05-20 10:54:31 +08:00
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
2016-05-27 02:38:08 +08:00
|
|
|
struct nfit_flush *nfit_flush;
|
2016-02-05 08:51:00 +08:00
|
|
|
struct nfit_dcr *nfit_dcr;
|
|
|
|
u32 device_handle;
|
|
|
|
u16 dcr;
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2017-04-14 10:46:36 +08:00
|
|
|
if (spa && nfit_memdev->memdev->range_index != spa->range_index)
|
|
|
|
continue;
|
|
|
|
if (!spa && nfit_memdev->memdev->range_index)
|
2015-05-20 10:54:31 +08:00
|
|
|
continue;
|
|
|
|
found = NULL;
|
|
|
|
dcr = nfit_memdev->memdev->region_index;
|
2016-02-05 08:51:00 +08:00
|
|
|
device_handle = nfit_memdev->memdev->device_handle;
|
2015-05-20 10:54:31 +08:00
|
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
|
2016-02-05 08:51:00 +08:00
|
|
|
if (__to_nfit_memdev(nfit_mem)->device_handle
|
|
|
|
== device_handle) {
|
2015-05-20 10:54:31 +08:00
|
|
|
found = nfit_mem;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found)
|
|
|
|
nfit_mem = found;
|
|
|
|
else {
|
|
|
|
nfit_mem = devm_kzalloc(acpi_desc->dev,
|
|
|
|
sizeof(*nfit_mem), GFP_KERNEL);
|
|
|
|
if (!nfit_mem)
|
|
|
|
return -ENOMEM;
|
|
|
|
INIT_LIST_HEAD(&nfit_mem->list);
|
2016-04-06 06:26:50 +08:00
|
|
|
nfit_mem->acpi_desc = acpi_desc;
|
2016-02-05 08:51:00 +08:00
|
|
|
list_add(&nfit_mem->list, &acpi_desc->dimms);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
|
|
|
|
if (nfit_dcr->dcr->region_index != dcr)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Record the control region for the dimm. For
|
|
|
|
* the ACPI 6.1 case, where there are separate
|
|
|
|
* control regions for the pmem vs blk
|
|
|
|
* interfaces, be sure to record the extended
|
|
|
|
* blk details.
|
|
|
|
*/
|
|
|
|
if (!nfit_mem->dcr)
|
|
|
|
nfit_mem->dcr = nfit_dcr->dcr;
|
|
|
|
else if (nfit_mem->dcr->windows == 0
|
|
|
|
&& nfit_dcr->dcr->windows)
|
|
|
|
nfit_mem->dcr = nfit_dcr->dcr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-05-27 02:38:08 +08:00
|
|
|
list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
|
2016-06-08 08:00:04 +08:00
|
|
|
struct acpi_nfit_flush_address *flush;
|
|
|
|
u16 i;
|
|
|
|
|
2016-05-27 02:38:08 +08:00
|
|
|
if (nfit_flush->flush->device_handle != device_handle)
|
|
|
|
continue;
|
|
|
|
nfit_mem->nfit_flush = nfit_flush;
|
2016-06-08 08:00:04 +08:00
|
|
|
flush = nfit_flush->flush;
|
treewide: devm_kzalloc() -> devm_kcalloc()
The devm_kzalloc() function has a 2-factor argument form, devm_kcalloc().
This patch replaces cases of:
devm_kzalloc(handle, a * b, gfp)
with:
devm_kcalloc(handle, a * b, gfp)
as well as handling cases of:
devm_kzalloc(handle, a * b * c, gfp)
with:
devm_kzalloc(handle, array3_size(a, b, c), gfp)
as it's slightly less ugly than:
devm_kcalloc(handle, array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
devm_kzalloc(handle, 4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
Some manual whitespace fixes were needed in this patch, as Coccinelle
really liked to write "=devm_kcalloc..." instead of "= devm_kcalloc...".
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
expression HANDLE;
type TYPE;
expression THING, E;
@@
(
devm_kzalloc(HANDLE,
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
devm_kzalloc(HANDLE,
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression HANDLE;
expression COUNT;
typedef u8;
typedef __u8;
@@
(
devm_kzalloc(HANDLE,
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
expression HANDLE;
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
expression HANDLE;
identifier SIZE, COUNT;
@@
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression HANDLE;
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression HANDLE;
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
expression HANDLE;
identifier STRIDE, SIZE, COUNT;
@@
(
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression HANDLE;
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE,
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression HANDLE;
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, sizeof(THING) * C2, ...)
|
devm_kzalloc(HANDLE, sizeof(TYPE) * C2, ...)
|
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE, C1 * C2, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * E2
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * (E2)
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:07:58 +08:00
|
|
|
nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
|
|
|
|
flush->hint_count,
|
|
|
|
sizeof(struct resource),
|
|
|
|
GFP_KERNEL);
|
2016-06-08 08:00:04 +08:00
|
|
|
if (!nfit_mem->flush_wpq)
|
|
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < flush->hint_count; i++) {
|
|
|
|
struct resource *res = &nfit_mem->flush_wpq[i];
|
|
|
|
|
|
|
|
res->start = flush->hint_address[i];
|
|
|
|
res->end = res->start + 8 - 1;
|
|
|
|
}
|
2016-05-27 02:38:08 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-02-05 08:51:00 +08:00
|
|
|
if (dcr && !nfit_mem->dcr) {
|
|
|
|
dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
|
|
|
|
spa->range_index, dcr);
|
|
|
|
return -ENODEV;
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (type == NFIT_SPA_DCR) {
|
2015-06-25 16:21:02 +08:00
|
|
|
struct nfit_idt *nfit_idt;
|
|
|
|
u16 idt_idx;
|
|
|
|
|
2015-05-20 10:54:31 +08:00
|
|
|
/* multiple dimms may share a SPA when interleaved */
|
|
|
|
nfit_mem->spa_dcr = spa;
|
|
|
|
nfit_mem->memdev_dcr = nfit_memdev->memdev;
|
2015-06-25 16:21:02 +08:00
|
|
|
idt_idx = nfit_memdev->memdev->interleave_index;
|
|
|
|
list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
|
|
|
|
if (nfit_idt->idt->interleave_index != idt_idx)
|
|
|
|
continue;
|
|
|
|
nfit_mem->idt_dcr = nfit_idt->idt;
|
|
|
|
break;
|
|
|
|
}
|
2016-02-05 08:51:00 +08:00
|
|
|
nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
|
2017-04-14 10:46:36 +08:00
|
|
|
} else if (type == NFIT_SPA_PM) {
|
2015-05-20 10:54:31 +08:00
|
|
|
/*
|
|
|
|
* A single dimm may belong to multiple SPA-PM
|
|
|
|
* ranges, record at least one in addition to
|
|
|
|
* any SPA-DCR range.
|
|
|
|
*/
|
|
|
|
nfit_mem->memdev_pmem = nfit_memdev->memdev;
|
2017-04-14 10:46:36 +08:00
|
|
|
} else
|
|
|
|
nfit_mem->memdev_dcr = nfit_memdev->memdev;
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
|
|
|
|
{
|
|
|
|
struct nfit_mem *a = container_of(_a, typeof(*a), list);
|
|
|
|
struct nfit_mem *b = container_of(_b, typeof(*b), list);
|
|
|
|
u32 handleA, handleB;
|
|
|
|
|
|
|
|
handleA = __to_nfit_memdev(a)->device_handle;
|
|
|
|
handleB = __to_nfit_memdev(b)->device_handle;
|
|
|
|
if (handleA < handleB)
|
|
|
|
return -1;
|
|
|
|
else if (handleA > handleB)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nfit_spa *nfit_spa;
|
2017-04-14 10:46:36 +08:00
|
|
|
int rc;
|
|
|
|
|
2015-05-20 10:54:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For each SPA-DCR or SPA-PMEM address range find its
|
|
|
|
* corresponding MEMDEV(s). From each MEMDEV find the
|
|
|
|
* corresponding DCR. Then, if we're operating on a SPA-DCR,
|
|
|
|
* try to find a SPA-BDW and a corresponding BDW that references
|
|
|
|
* the DCR. Throw it all into an nfit_mem object. Note, that
|
|
|
|
* BDWs are optional.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
2017-04-14 10:46:36 +08:00
|
|
|
rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
|
2015-05-20 10:54:31 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-04-14 10:46:36 +08:00
|
|
|
/*
|
|
|
|
* If a DIMM has failed to be mapped into SPA there will be no
|
|
|
|
* SPA entries above. Find and register all the unmapped DIMMs
|
|
|
|
* for reporting and recovery purposes.
|
|
|
|
*/
|
|
|
|
rc = __nfit_mem_init(acpi_desc, NULL);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2015-05-20 10:54:31 +08:00
|
|
|
list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-01 11:41:28 +08:00
|
|
|
static ssize_t bus_dsm_mask_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
2020-07-21 06:07:35 +08:00
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
2017-07-01 11:41:28 +08:00
|
|
|
|
2020-07-21 06:07:35 +08:00
|
|
|
return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
|
2017-07-01 11:41:28 +08:00
|
|
|
}
|
|
|
|
static struct device_attribute dev_attr_bus_dsm_mask =
|
|
|
|
__ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
|
|
|
|
|
2015-04-27 07:26:48 +08:00
|
|
|
static ssize_t revision_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
|
|
|
|
2015-11-21 08:05:49 +08:00
|
|
|
return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
|
2015-04-27 07:26:48 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(revision);
|
|
|
|
|
2016-10-01 07:19:29 +08:00
|
|
|
static ssize_t hw_error_scrub_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
|
|
|
|
|
|
|
return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The 'hw_error_scrub' attribute can have the following values written to it:
|
|
|
|
* '0': Switch to the default mode where an exception will only insert
|
|
|
|
* the address of the memory error into the poison and badblocks lists.
|
|
|
|
* '1': Enable a full scrub to happen if an exception for a memory error is
|
|
|
|
* received.
|
|
|
|
*/
|
|
|
|
static ssize_t hw_error_scrub_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t size)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc;
|
|
|
|
ssize_t rc;
|
|
|
|
long val;
|
|
|
|
|
|
|
|
rc = kstrtol(buf, 0, &val);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_lock(dev);
|
2016-10-01 07:19:29 +08:00
|
|
|
nd_desc = dev_get_drvdata(dev);
|
|
|
|
if (nd_desc) {
|
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
|
|
|
|
|
|
|
switch (val) {
|
|
|
|
case HW_ERROR_SCRUB_ON:
|
|
|
|
acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
|
|
|
|
break;
|
|
|
|
case HW_ERROR_SCRUB_OFF:
|
|
|
|
acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rc = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_unlock(dev);
|
2016-10-01 07:19:29 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(hw_error_scrub);
|
|
|
|
|
2016-07-24 12:51:42 +08:00
|
|
|
/*
|
|
|
|
* This shows the number of full Address Range Scrubs that have been
|
|
|
|
* completed since driver load time. Userspace can wait on this using
|
|
|
|
* select/poll etc. A '+' at the end indicates an ARS is in progress
|
|
|
|
*/
|
|
|
|
static ssize_t scrub_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc;
|
2019-02-14 01:57:22 +08:00
|
|
|
struct acpi_nfit_desc *acpi_desc;
|
2016-07-24 12:51:42 +08:00
|
|
|
ssize_t rc = -ENXIO;
|
2019-02-14 01:57:22 +08:00
|
|
|
bool busy;
|
2016-07-24 12:51:42 +08:00
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_lock(dev);
|
2016-07-24 12:51:42 +08:00
|
|
|
nd_desc = dev_get_drvdata(dev);
|
2019-02-14 01:57:22 +08:00
|
|
|
if (!nd_desc) {
|
2019-10-18 20:35:34 +08:00
|
|
|
nfit_device_unlock(dev);
|
2019-02-14 01:57:22 +08:00
|
|
|
return rc;
|
2016-07-24 12:51:42 +08:00
|
|
|
}
|
2019-02-14 01:57:22 +08:00
|
|
|
acpi_desc = to_acpi_desc(nd_desc);
|
2016-07-24 12:51:42 +08:00
|
|
|
|
2019-02-14 01:57:22 +08:00
|
|
|
mutex_lock(&acpi_desc->init_mutex);
|
|
|
|
busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
|
|
|
|
&& !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
|
|
|
|
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
|
2019-02-14 01:04:07 +08:00
|
|
|
/* Allow an admin to poll the busy state at a higher rate */
|
|
|
|
if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
|
|
|
|
&acpi_desc->scrub_flags)) {
|
|
|
|
acpi_desc->scrub_tmo = 1;
|
|
|
|
mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
|
2016-07-24 12:51:42 +08:00
|
|
|
}
|
2019-02-14 01:04:07 +08:00
|
|
|
|
2019-02-14 01:57:22 +08:00
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_unlock(dev);
|
2016-07-24 12:51:42 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t scrub_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t size)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc;
|
|
|
|
ssize_t rc;
|
|
|
|
long val;
|
|
|
|
|
|
|
|
rc = kstrtol(buf, 0, &val);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
if (val != 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_lock(dev);
|
2016-07-24 12:51:42 +08:00
|
|
|
nd_desc = dev_get_drvdata(dev);
|
|
|
|
if (nd_desc) {
|
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
|
|
|
|
2018-12-04 02:30:25 +08:00
|
|
|
rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
|
2016-07-24 12:51:42 +08:00
|
|
|
}
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_unlock(dev);
|
2016-07-24 12:51:42 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(scrub);
|
|
|
|
|
|
|
|
static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
|
|
|
const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
|
|
|
|
| 1 << ND_CMD_ARS_STATUS;
|
|
|
|
|
|
|
|
return (nd_desc->cmd_mask & mask) == mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
|
|
|
|
{
|
|
|
|
struct device *dev = container_of(kobj, struct device, kobj);
|
|
|
|
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
|
|
|
|
|
2020-07-21 06:08:24 +08:00
|
|
|
if (a == &dev_attr_scrub.attr)
|
|
|
|
return ars_supported(nvdimm_bus) ? a->mode : 0;
|
|
|
|
|
|
|
|
if (a == &dev_attr_firmware_activate_noidle.attr)
|
|
|
|
return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
|
|
|
|
|
2016-07-24 12:51:42 +08:00
|
|
|
return a->mode;
|
|
|
|
}
|
|
|
|
|
2015-04-27 07:26:48 +08:00
|
|
|
static struct attribute *acpi_nfit_attributes[] = {
|
|
|
|
&dev_attr_revision.attr,
|
2016-07-24 12:51:42 +08:00
|
|
|
&dev_attr_scrub.attr,
|
2016-10-01 07:19:29 +08:00
|
|
|
&dev_attr_hw_error_scrub.attr,
|
2017-07-01 11:41:28 +08:00
|
|
|
&dev_attr_bus_dsm_mask.attr,
|
2020-07-21 06:08:24 +08:00
|
|
|
&dev_attr_firmware_activate_noidle.attr,
|
2015-04-27 07:26:48 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2017-06-22 18:14:41 +08:00
|
|
|
static const struct attribute_group acpi_nfit_attribute_group = {
|
2015-04-27 07:26:48 +08:00
|
|
|
.name = "nfit",
|
|
|
|
.attrs = acpi_nfit_attributes,
|
2016-07-24 12:51:42 +08:00
|
|
|
.is_visible = nfit_visible,
|
2015-04-27 07:26:48 +08:00
|
|
|
};
|
|
|
|
|
2016-02-20 04:29:32 +08:00
|
|
|
static const struct attribute_group *acpi_nfit_attribute_groups[] = {
|
2015-04-27 07:26:48 +08:00
|
|
|
&acpi_nfit_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2015-04-25 15:56:17 +08:00
|
|
|
static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
|
|
|
|
return __to_nfit_memdev(nfit_mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
|
|
|
|
return nfit_mem->dcr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t handle_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%#x\n", memdev->device_handle);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(handle);
|
|
|
|
|
|
|
|
static ssize_t phys_id_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%#x\n", memdev->physical_id);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(phys_id);
|
|
|
|
|
|
|
|
static ssize_t vendor_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
2016-04-26 05:34:58 +08:00
|
|
|
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
|
2015-04-25 15:56:17 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(vendor);
|
|
|
|
|
|
|
|
static ssize_t rev_id_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
2016-04-26 05:34:58 +08:00
|
|
|
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
|
2015-04-25 15:56:17 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(rev_id);
|
|
|
|
|
|
|
|
static ssize_t device_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
2016-04-26 05:34:58 +08:00
|
|
|
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
|
2015-04-25 15:56:17 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(device);
|
|
|
|
|
2016-04-30 01:33:23 +08:00
|
|
|
static ssize_t subsystem_vendor_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(subsystem_vendor);
|
|
|
|
|
|
|
|
static ssize_t subsystem_rev_id_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "0x%04x\n",
|
|
|
|
be16_to_cpu(dcr->subsystem_revision_id));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(subsystem_rev_id);
|
|
|
|
|
|
|
|
static ssize_t subsystem_device_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(subsystem_device);
|
|
|
|
|
2016-04-06 06:26:50 +08:00
|
|
|
static int num_nvdimm_formats(struct nvdimm *nvdimm)
|
|
|
|
{
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
int formats = 0;
|
|
|
|
|
|
|
|
if (nfit_mem->memdev_pmem)
|
|
|
|
formats++;
|
|
|
|
if (nfit_mem->memdev_bdw)
|
|
|
|
formats++;
|
|
|
|
return formats;
|
|
|
|
}
|
|
|
|
|
2015-04-25 15:56:17 +08:00
|
|
|
static ssize_t format_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
2016-06-30 02:19:32 +08:00
|
|
|
return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
|
2015-04-25 15:56:17 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(format);
|
|
|
|
|
2016-04-06 06:26:50 +08:00
|
|
|
static ssize_t format1_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
u32 handle;
|
|
|
|
ssize_t rc = -ENXIO;
|
|
|
|
struct nfit_mem *nfit_mem;
|
|
|
|
struct nfit_memdev *nfit_memdev;
|
|
|
|
struct acpi_nfit_desc *acpi_desc;
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
|
|
|
nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
acpi_desc = nfit_mem->acpi_desc;
|
|
|
|
handle = to_nfit_memdev(dev)->device_handle;
|
|
|
|
|
|
|
|
/* assumes DIMMs have at most 2 published interface codes */
|
|
|
|
mutex_lock(&acpi_desc->init_mutex);
|
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
|
|
|
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
|
|
|
|
struct nfit_dcr *nfit_dcr;
|
|
|
|
|
|
|
|
if (memdev->device_handle != handle)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
|
|
|
|
if (nfit_dcr->dcr->region_index != memdev->region_index)
|
|
|
|
continue;
|
|
|
|
if (nfit_dcr->dcr->code == dcr->code)
|
|
|
|
continue;
|
2016-06-30 02:19:32 +08:00
|
|
|
rc = sprintf(buf, "0x%04x\n",
|
|
|
|
le16_to_cpu(nfit_dcr->dcr->code));
|
2016-04-06 06:26:50 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (rc != ENXIO)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(format1);
|
|
|
|
|
|
|
|
static ssize_t formats_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(formats);
|
|
|
|
|
2015-04-25 15:56:17 +08:00
|
|
|
static ssize_t serial_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
2016-04-26 05:34:58 +08:00
|
|
|
return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
|
2015-04-25 15:56:17 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(serial);
|
|
|
|
|
2016-04-29 09:18:05 +08:00
|
|
|
static ssize_t family_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
|
|
|
|
if (nfit_mem->family < 0)
|
|
|
|
return -ENXIO;
|
|
|
|
return sprintf(buf, "%d\n", nfit_mem->family);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(family);
|
|
|
|
|
|
|
|
static ssize_t dsm_mask_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
|
|
|
|
if (nfit_mem->family < 0)
|
|
|
|
return -ENXIO;
|
|
|
|
return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(dsm_mask);
|
|
|
|
|
2015-06-24 08:08:34 +08:00
|
|
|
static ssize_t flags_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2018-09-27 01:47:15 +08:00
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
u16 flags = __to_nfit_memdev(nfit_mem)->flags;
|
|
|
|
|
|
|
|
if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
|
|
|
|
flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
|
2015-06-24 08:08:34 +08:00
|
|
|
|
2017-04-14 06:05:30 +08:00
|
|
|
return sprintf(buf, "%s%s%s%s%s%s%s\n",
|
nfit: Clarify memory device state flags strings
ACPI 6.0 NFIT Memory Device State Flags in Table 5-129 defines
NVDIMM status as follows. These bits indicate multiple info,
such as failures, pending event, and capability.
Bit [0] set to 1 to indicate that the previous SAVE to the
Memory Device failed.
Bit [1] set to 1 to indicate that the last RESTORE from the
Memory Device failed.
Bit [2] set to 1 to indicate that platform flush of data to
Memory Device failed. As a result, the restored data content
may be inconsistent even if SAVE and RESTORE do not indicate
failure.
Bit [3] set to 1 to indicate that the Memory Device is observed
to be not armed prior to OSPM hand off. A Memory Device is
considered armed if it is able to accept persistent writes.
Bit [4] set to 1 to indicate that the Memory Device observed
SMART and health events prior to OSPM handoff.
/sys/bus/nd/devices/nmemX/nfit/flags shows this flags info.
The output strings associated with the bits are "save", "restore",
"smart", etc., which can be confusing as they may be interpreted
as positive status, i.e. save succeeded.
Change also the dev_info() message in acpi_nfit_register_dimms()
to be consistent with the sysfs flags strings.
Reported-by: Robert Elliott <elliott@hp.com>
Signed-off-by: Toshi Kani <toshi.kani@hp.com>
[ross: rename 'not_arm' to 'not_armed']
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
[djbw: defer adding bit5, HEALTH_ENABLED, for now]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 00:20:23 +08:00
|
|
|
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
|
|
|
|
flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
|
|
|
|
flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
|
2015-10-19 10:24:52 +08:00
|
|
|
flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
|
2017-04-14 06:05:30 +08:00
|
|
|
flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
|
|
|
|
flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
|
|
|
|
flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
|
2015-06-24 08:08:34 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(flags);
|
|
|
|
|
2016-04-26 05:34:59 +08:00
|
|
|
static ssize_t id_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2018-12-05 02:31:20 +08:00
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
2016-04-26 05:34:59 +08:00
|
|
|
|
2018-12-05 02:31:20 +08:00
|
|
|
return sprintf(buf, "%s\n", nfit_mem->id);
|
2016-04-26 05:34:59 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(id);
|
|
|
|
|
2018-09-27 01:47:15 +08:00
|
|
|
static ssize_t dirty_shutdown_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
|
|
|
|
return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(dirty_shutdown);
|
|
|
|
|
2015-04-25 15:56:17 +08:00
|
|
|
static struct attribute *acpi_nfit_dimm_attributes[] = {
|
|
|
|
&dev_attr_handle.attr,
|
|
|
|
&dev_attr_phys_id.attr,
|
|
|
|
&dev_attr_vendor.attr,
|
|
|
|
&dev_attr_device.attr,
|
2016-04-30 01:33:23 +08:00
|
|
|
&dev_attr_rev_id.attr,
|
|
|
|
&dev_attr_subsystem_vendor.attr,
|
|
|
|
&dev_attr_subsystem_device.attr,
|
|
|
|
&dev_attr_subsystem_rev_id.attr,
|
2015-04-25 15:56:17 +08:00
|
|
|
&dev_attr_format.attr,
|
2016-04-06 06:26:50 +08:00
|
|
|
&dev_attr_formats.attr,
|
|
|
|
&dev_attr_format1.attr,
|
2015-04-25 15:56:17 +08:00
|
|
|
&dev_attr_serial.attr,
|
2015-06-24 08:08:34 +08:00
|
|
|
&dev_attr_flags.attr,
|
2016-04-26 05:34:59 +08:00
|
|
|
&dev_attr_id.attr,
|
2016-04-29 09:18:05 +08:00
|
|
|
&dev_attr_family.attr,
|
|
|
|
&dev_attr_dsm_mask.attr,
|
2018-09-27 01:47:15 +08:00
|
|
|
&dev_attr_dirty_shutdown.attr,
|
2015-04-25 15:56:17 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
|
|
|
|
struct attribute *a, int n)
|
|
|
|
{
|
|
|
|
struct device *dev = container_of(kobj, struct device, kobj);
|
2016-04-06 06:26:50 +08:00
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
2018-09-27 01:47:15 +08:00
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
2015-04-25 15:56:17 +08:00
|
|
|
|
2017-04-14 10:46:36 +08:00
|
|
|
if (!to_nfit_dcr(dev)) {
|
|
|
|
/* Without a dcr only the memdev attributes can be surfaced */
|
|
|
|
if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
|
|
|
|
|| a == &dev_attr_flags.attr
|
|
|
|
|| a == &dev_attr_family.attr
|
|
|
|
|| a == &dev_attr_dsm_mask.attr)
|
|
|
|
return a->mode;
|
2016-04-06 06:26:50 +08:00
|
|
|
return 0;
|
2017-04-14 10:46:36 +08:00
|
|
|
}
|
|
|
|
|
2016-04-06 06:26:50 +08:00
|
|
|
if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
|
2015-04-25 15:56:17 +08:00
|
|
|
return 0;
|
2018-09-27 01:47:15 +08:00
|
|
|
|
|
|
|
if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
|
|
|
|
&& a == &dev_attr_dirty_shutdown.attr)
|
|
|
|
return 0;
|
|
|
|
|
2016-04-06 06:26:50 +08:00
|
|
|
return a->mode;
|
2015-04-25 15:56:17 +08:00
|
|
|
}
|
|
|
|
|
2017-06-22 18:14:41 +08:00
|
|
|
static const struct attribute_group acpi_nfit_dimm_attribute_group = {
|
2015-04-25 15:56:17 +08:00
|
|
|
.name = "nfit",
|
|
|
|
.attrs = acpi_nfit_dimm_attributes,
|
|
|
|
.is_visible = acpi_nfit_dimm_attr_visible,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
|
|
|
|
&acpi_nfit_dimm_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
u32 device_handle)
|
|
|
|
{
|
|
|
|
struct nfit_mem *nfit_mem;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
|
|
|
|
if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
|
|
|
|
return nfit_mem->nvdimm;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-08-23 10:23:25 +08:00
|
|
|
void __acpi_nvdimm_notify(struct device *dev, u32 event)
|
2016-08-23 10:28:37 +08:00
|
|
|
{
|
|
|
|
struct nfit_mem *nfit_mem;
|
|
|
|
struct acpi_nfit_desc *acpi_desc;
|
|
|
|
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
|
2016-08-23 10:28:37 +08:00
|
|
|
event);
|
|
|
|
|
|
|
|
if (event != NFIT_NOTIFY_DIMM_HEALTH) {
|
|
|
|
dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
|
|
|
|
event);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
acpi_desc = dev_get_drvdata(dev->parent);
|
|
|
|
if (!acpi_desc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we successfully retrieved acpi_desc, then we know nfit_mem data
|
|
|
|
* is still valid.
|
|
|
|
*/
|
|
|
|
nfit_mem = dev_get_drvdata(dev);
|
|
|
|
if (nfit_mem && nfit_mem->flags_attr)
|
|
|
|
sysfs_notify_dirent(nfit_mem->flags_attr);
|
|
|
|
}
|
2016-08-23 10:23:25 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
|
2016-08-23 10:28:37 +08:00
|
|
|
|
|
|
|
static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
|
|
|
|
{
|
|
|
|
struct acpi_device *adev = data;
|
|
|
|
struct device *dev = &adev->dev;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_lock(dev->parent);
|
2016-08-23 10:28:37 +08:00
|
|
|
__acpi_nvdimm_notify(dev, event);
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_unlock(dev->parent);
|
2016-08-23 10:28:37 +08:00
|
|
|
}
|
|
|
|
|
2018-03-29 01:44:50 +08:00
|
|
|
static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
|
|
|
|
{
|
|
|
|
acpi_handle handle;
|
|
|
|
acpi_status status;
|
|
|
|
|
|
|
|
status = acpi_get_handle(adev->handle, method, &handle);
|
|
|
|
|
|
|
|
if (ACPI_SUCCESS(status))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-10-18 01:47:19 +08:00
|
|
|
__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
|
2018-09-27 01:47:15 +08:00
|
|
|
{
|
2019-01-30 14:06:41 +08:00
|
|
|
struct device *dev = &nfit_mem->adev->dev;
|
2018-09-27 01:47:15 +08:00
|
|
|
struct nd_intel_smart smart = { 0 };
|
|
|
|
union acpi_object in_buf = {
|
2019-01-30 14:06:41 +08:00
|
|
|
.buffer.type = ACPI_TYPE_BUFFER,
|
|
|
|
.buffer.length = 0,
|
2018-09-27 01:47:15 +08:00
|
|
|
};
|
|
|
|
union acpi_object in_obj = {
|
2019-01-30 14:06:41 +08:00
|
|
|
.package.type = ACPI_TYPE_PACKAGE,
|
2018-09-27 01:47:15 +08:00
|
|
|
.package.count = 1,
|
|
|
|
.package.elements = &in_buf,
|
|
|
|
};
|
|
|
|
const u8 func = ND_INTEL_SMART;
|
|
|
|
const guid_t *guid = to_nfit_uuid(nfit_mem->family);
|
|
|
|
u8 revid = nfit_dsm_revid(nfit_mem->family, func);
|
|
|
|
struct acpi_device *adev = nfit_mem->adev;
|
|
|
|
acpi_handle handle = adev->handle;
|
|
|
|
union acpi_object *out_obj;
|
|
|
|
|
|
|
|
if ((nfit_mem->dsm_mask & (1 << func)) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
|
2019-01-30 14:06:41 +08:00
|
|
|
if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
|
|
|
|
|| out_obj->buffer.length < sizeof(smart)) {
|
|
|
|
dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
|
|
|
|
dev_name(dev));
|
|
|
|
ACPI_FREE(out_obj);
|
2018-09-27 01:47:15 +08:00
|
|
|
return;
|
2019-01-30 14:06:41 +08:00
|
|
|
}
|
|
|
|
memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
|
|
|
|
ACPI_FREE(out_obj);
|
2018-09-27 01:47:15 +08:00
|
|
|
|
|
|
|
if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
|
|
|
|
if (smart.shutdown_state)
|
|
|
|
set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
|
|
|
|
set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
|
|
|
|
nfit_mem->dirty_shutdown = smart.shutdown_count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void populate_shutdown_status(struct nfit_mem *nfit_mem)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* For DIMMs that provide a dynamic facility to retrieve a
|
|
|
|
* dirty-shutdown status and/or a dirty-shutdown count, cache
|
|
|
|
* these values in nfit_mem.
|
|
|
|
*/
|
|
|
|
if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
|
|
|
|
nfit_intel_shutdown_status(nfit_mem);
|
|
|
|
}
|
|
|
|
|
2015-06-09 02:27:06 +08:00
|
|
|
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_mem *nfit_mem, u32 device_handle)
|
|
|
|
{
|
2020-07-21 06:07:30 +08:00
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
2015-06-09 02:27:06 +08:00
|
|
|
struct acpi_device *adev, *adev_dimm;
|
|
|
|
struct device *dev = acpi_desc->dev;
|
2018-06-14 00:06:52 +08:00
|
|
|
unsigned long dsm_mask, label_mask;
|
2017-06-06 00:40:42 +08:00
|
|
|
const guid_t *guid;
|
2015-07-23 04:17:22 +08:00
|
|
|
int i;
|
2017-03-08 05:35:13 +08:00
|
|
|
int family = -1;
|
2018-12-05 02:31:20 +08:00
|
|
|
struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
|
2015-06-09 02:27:06 +08:00
|
|
|
|
2016-04-29 07:17:07 +08:00
|
|
|
/* nfit test assumes 1:1 relationship between commands and dsms */
|
|
|
|
nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
|
2016-04-29 07:23:43 +08:00
|
|
|
nfit_mem->family = NVDIMM_FAMILY_INTEL;
|
2020-07-21 06:07:30 +08:00
|
|
|
set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
|
2018-12-05 02:31:20 +08:00
|
|
|
|
|
|
|
if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
|
|
|
|
sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
|
|
|
|
be16_to_cpu(dcr->vendor_id),
|
|
|
|
dcr->manufacturing_location,
|
|
|
|
be16_to_cpu(dcr->manufacturing_date),
|
|
|
|
be32_to_cpu(dcr->serial_number));
|
|
|
|
else
|
|
|
|
sprintf(nfit_mem->id, "%04x-%08x",
|
|
|
|
be16_to_cpu(dcr->vendor_id),
|
|
|
|
be32_to_cpu(dcr->serial_number));
|
|
|
|
|
2015-06-09 02:27:06 +08:00
|
|
|
adev = to_acpi_dev(acpi_desc);
|
2018-10-18 01:47:19 +08:00
|
|
|
if (!adev) {
|
|
|
|
/* unit test case */
|
|
|
|
populate_shutdown_status(nfit_mem);
|
2015-06-09 02:27:06 +08:00
|
|
|
return 0;
|
2018-10-18 01:47:19 +08:00
|
|
|
}
|
2015-06-09 02:27:06 +08:00
|
|
|
|
|
|
|
adev_dimm = acpi_find_child_device(adev, device_handle, false);
|
|
|
|
nfit_mem->adev = adev_dimm;
|
|
|
|
if (!adev_dimm) {
|
|
|
|
dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
|
|
|
|
device_handle);
|
2015-06-01 02:41:48 +08:00
|
|
|
return force_enable_dimms ? 0 : -ENODEV;
|
2015-06-09 02:27:06 +08:00
|
|
|
}
|
|
|
|
|
2016-08-23 10:28:37 +08:00
|
|
|
if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
|
|
|
|
ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
|
|
|
|
dev_err(dev, "%s: notification registration failed\n",
|
|
|
|
dev_name(&adev_dimm->dev));
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
2017-12-01 11:42:52 +08:00
|
|
|
/*
|
|
|
|
* Record nfit_mem for the notification path to track back to
|
|
|
|
* the nfit sysfs attributes for this dimm device object.
|
|
|
|
*/
|
|
|
|
dev_set_drvdata(&adev_dimm->dev, nfit_mem);
|
2016-08-23 10:28:37 +08:00
|
|
|
|
2016-04-29 07:23:43 +08:00
|
|
|
/*
|
2019-01-29 08:56:17 +08:00
|
|
|
* There are 4 "legacy" NVDIMM command sets
|
|
|
|
* (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
|
|
|
|
* an EFI working group was established to constrain this
|
|
|
|
* proliferation. The nfit driver probes for the supported command
|
|
|
|
* set by GUID. Note, if you're a platform developer looking to add
|
|
|
|
* a new command set to this probe, consider using an existing set,
|
|
|
|
* or otherwise seek approval to publish the command set at
|
|
|
|
* http://www.uefi.org/RFIC_LIST.
|
|
|
|
*
|
|
|
|
* Note, that checking for function0 (bit0) tells us if any commands
|
|
|
|
* are reachable through this GUID.
|
2016-04-29 07:23:43 +08:00
|
|
|
*/
|
2020-07-21 06:07:30 +08:00
|
|
|
clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
|
2017-10-21 06:39:43 +08:00
|
|
|
for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
|
2020-07-21 06:07:30 +08:00
|
|
|
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) {
|
|
|
|
set_bit(i, &nd_desc->dimm_family_mask);
|
2017-03-08 05:35:13 +08:00
|
|
|
if (family < 0 || i == default_dsm_family)
|
|
|
|
family = i;
|
2020-07-21 06:07:30 +08:00
|
|
|
}
|
2016-04-29 07:23:43 +08:00
|
|
|
|
|
|
|
/* limit the supported commands to those that are publicly documented */
|
2017-03-08 05:35:13 +08:00
|
|
|
nfit_mem->family = family;
|
2017-03-08 05:35:12 +08:00
|
|
|
if (override_dsm_mask && !disable_vendor_specific)
|
|
|
|
dsm_mask = override_dsm_mask;
|
|
|
|
else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
|
2017-10-21 06:39:43 +08:00
|
|
|
dsm_mask = NVDIMM_INTEL_CMDMASK;
|
2016-04-29 09:01:20 +08:00
|
|
|
if (disable_vendor_specific)
|
|
|
|
dsm_mask &= ~(1 << ND_CMD_VENDOR);
|
2016-05-27 00:38:41 +08:00
|
|
|
} else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
|
2016-04-29 07:23:43 +08:00
|
|
|
dsm_mask = 0x1c3c76;
|
2016-05-27 00:38:41 +08:00
|
|
|
} else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
|
2016-04-29 07:23:43 +08:00
|
|
|
dsm_mask = 0x1fe;
|
2016-04-29 09:01:20 +08:00
|
|
|
if (disable_vendor_specific)
|
|
|
|
dsm_mask &= ~(1 << 8);
|
2016-05-27 00:38:41 +08:00
|
|
|
} else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
|
|
|
|
dsm_mask = 0xffffffff;
|
2019-01-29 08:56:17 +08:00
|
|
|
} else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
|
|
|
|
dsm_mask = 0x1f;
|
2016-04-29 09:01:20 +08:00
|
|
|
} else {
|
2016-07-20 03:32:39 +08:00
|
|
|
dev_dbg(dev, "unknown dimm command family\n");
|
2016-04-29 07:23:43 +08:00
|
|
|
nfit_mem->family = -1;
|
2016-07-20 03:32:39 +08:00
|
|
|
/* DSMs are optional, continue loading the driver... */
|
|
|
|
return 0;
|
2016-04-29 07:23:43 +08:00
|
|
|
}
|
|
|
|
|
2019-01-15 06:07:19 +08:00
|
|
|
/*
|
|
|
|
* Function 0 is the command interrogation function, don't
|
|
|
|
* export it to potential userspace use, and enable it to be
|
|
|
|
* used as an error value in acpi_nfit_ctl().
|
|
|
|
*/
|
|
|
|
dsm_mask &= ~1UL;
|
|
|
|
|
2017-06-06 00:40:42 +08:00
|
|
|
guid = to_nfit_uuid(nfit_mem->family);
|
2016-04-29 07:23:43 +08:00
|
|
|
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
|
2017-10-21 06:39:43 +08:00
|
|
|
if (acpi_check_dsm(adev_dimm->handle, guid,
|
|
|
|
nfit_dsm_revid(nfit_mem->family, i),
|
|
|
|
1ULL << i))
|
2015-06-09 02:27:06 +08:00
|
|
|
set_bit(i, &nfit_mem->dsm_mask);
|
|
|
|
|
2018-06-14 00:06:52 +08:00
|
|
|
/*
|
|
|
|
* Prefer the NVDIMM_FAMILY_INTEL label read commands if present
|
|
|
|
* due to their better semantics handling locked capacity.
|
|
|
|
*/
|
|
|
|
label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
|
|
|
|
| 1 << ND_CMD_SET_CONFIG_DATA;
|
|
|
|
if (family == NVDIMM_FAMILY_INTEL
|
|
|
|
&& (dsm_mask & label_mask) == label_mask)
|
2019-01-30 14:06:41 +08:00
|
|
|
/* skip _LS{I,R,W} enabling */;
|
|
|
|
else {
|
|
|
|
if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
|
|
|
|
&& acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
|
|
|
|
dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
|
|
|
|
set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
|
|
|
|
}
|
2018-06-14 00:06:52 +08:00
|
|
|
|
2019-01-30 14:06:41 +08:00
|
|
|
if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
|
|
|
|
&& acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
|
|
|
|
dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
|
|
|
|
set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
|
|
|
|
}
|
2017-09-25 00:57:34 +08:00
|
|
|
|
2019-02-04 03:17:27 +08:00
|
|
|
/*
|
|
|
|
* Quirk read-only label configurations to preserve
|
|
|
|
* access to label-less namespaces by default.
|
|
|
|
*/
|
|
|
|
if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
|
|
|
|
&& !force_labels) {
|
|
|
|
dev_dbg(dev, "%s: No _LSW, disable labels\n",
|
|
|
|
dev_name(&adev_dimm->dev));
|
|
|
|
clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
|
|
|
|
} else
|
|
|
|
dev_dbg(dev, "%s: Force enable labels\n",
|
|
|
|
dev_name(&adev_dimm->dev));
|
2017-09-25 00:57:34 +08:00
|
|
|
}
|
|
|
|
|
2018-09-27 01:47:15 +08:00
|
|
|
populate_shutdown_status(nfit_mem);
|
|
|
|
|
2015-07-23 04:17:22 +08:00
|
|
|
return 0;
|
2015-06-09 02:27:06 +08:00
|
|
|
}
|
|
|
|
|
2016-08-23 10:28:37 +08:00
|
|
|
static void shutdown_dimm_notify(void *data)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_desc *acpi_desc = data;
|
|
|
|
struct nfit_mem *nfit_mem;
|
|
|
|
|
|
|
|
mutex_lock(&acpi_desc->init_mutex);
|
|
|
|
/*
|
|
|
|
* Clear out the nfit_mem->flags_attr and shut down dimm event
|
|
|
|
* notifications.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
|
2016-08-23 10:23:25 +08:00
|
|
|
struct acpi_device *adev_dimm = nfit_mem->adev;
|
|
|
|
|
2016-08-23 10:28:37 +08:00
|
|
|
if (nfit_mem->flags_attr) {
|
|
|
|
sysfs_put(nfit_mem->flags_attr);
|
|
|
|
nfit_mem->flags_attr = NULL;
|
|
|
|
}
|
2017-12-01 11:42:52 +08:00
|
|
|
if (adev_dimm) {
|
2016-08-23 10:23:25 +08:00
|
|
|
acpi_remove_notify_handler(adev_dimm->handle,
|
|
|
|
ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
|
2017-12-01 11:42:52 +08:00
|
|
|
dev_set_drvdata(&adev_dimm->dev, NULL);
|
|
|
|
}
|
2016-08-23 10:28:37 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
|
|
}
|
|
|
|
|
2018-12-06 15:39:29 +08:00
|
|
|
static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
|
|
|
|
{
|
|
|
|
switch (family) {
|
|
|
|
case NVDIMM_FAMILY_INTEL:
|
|
|
|
return intel_security_ops;
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-21 06:08:24 +08:00
|
|
|
static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
|
|
|
|
struct nfit_mem *nfit_mem)
|
|
|
|
{
|
|
|
|
unsigned long mask;
|
|
|
|
struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
|
|
|
|
|
|
|
if (!nd_desc->fw_ops)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
|
|
|
|
if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return intel_fw_ops;
|
|
|
|
}
|
|
|
|
|
2015-04-25 15:56:17 +08:00
|
|
|
static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nfit_mem *nfit_mem;
|
2016-08-23 10:28:37 +08:00
|
|
|
int dimm_count = 0, rc;
|
|
|
|
struct nvdimm *nvdimm;
|
2015-04-25 15:56:17 +08:00
|
|
|
|
|
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
|
2016-06-08 08:00:04 +08:00
|
|
|
struct acpi_nfit_flush_address *flush;
|
2016-04-29 07:23:43 +08:00
|
|
|
unsigned long flags = 0, cmd_mask;
|
2017-04-15 01:27:11 +08:00
|
|
|
struct nfit_memdev *nfit_memdev;
|
2015-04-25 15:56:17 +08:00
|
|
|
u32 device_handle;
|
2015-06-24 08:08:34 +08:00
|
|
|
u16 mem_flags;
|
2015-04-25 15:56:17 +08:00
|
|
|
|
|
|
|
device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
|
|
|
|
nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
|
|
|
|
if (nvdimm) {
|
2015-10-28 06:58:27 +08:00
|
|
|
dimm_count++;
|
2015-04-25 15:56:17 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-01-31 04:06:18 +08:00
|
|
|
if (nfit_mem->bdw && nfit_mem->memdev_pmem) {
|
2017-05-05 05:01:24 +08:00
|
|
|
set_bit(NDD_ALIASING, &flags);
|
2020-01-31 04:06:18 +08:00
|
|
|
set_bit(NDD_LABELING, &flags);
|
|
|
|
}
|
2015-04-25 15:56:17 +08:00
|
|
|
|
2017-04-15 01:27:11 +08:00
|
|
|
/* collate flags across all memdevs for this dimm */
|
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
|
|
|
struct acpi_nfit_memory_map *dimm_memdev;
|
|
|
|
|
|
|
|
dimm_memdev = __to_nfit_memdev(nfit_mem);
|
|
|
|
if (dimm_memdev->device_handle
|
|
|
|
!= nfit_memdev->memdev->device_handle)
|
|
|
|
continue;
|
|
|
|
dimm_memdev->flags |= nfit_memdev->memdev->flags;
|
|
|
|
}
|
|
|
|
|
2015-06-24 08:08:34 +08:00
|
|
|
mem_flags = __to_nfit_memdev(nfit_mem)->flags;
|
2015-10-19 10:24:52 +08:00
|
|
|
if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
|
2017-05-05 05:01:24 +08:00
|
|
|
set_bit(NDD_UNARMED, &flags);
|
2015-06-24 08:08:34 +08:00
|
|
|
|
2015-06-09 02:27:06 +08:00
|
|
|
rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
|
|
|
|
if (rc)
|
|
|
|
continue;
|
|
|
|
|
2016-04-29 07:17:07 +08:00
|
|
|
/*
|
2016-04-29 07:23:43 +08:00
|
|
|
* TODO: provide translation for non-NVDIMM_FAMILY_INTEL
|
|
|
|
* devices (i.e. from nd_cmd to acpi_dsm) to standardize the
|
|
|
|
* userspace interface.
|
2016-04-29 07:17:07 +08:00
|
|
|
*/
|
2016-04-29 07:23:43 +08:00
|
|
|
cmd_mask = 1UL << ND_CMD_CALL;
|
2017-10-30 03:13:07 +08:00
|
|
|
if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
|
|
|
|
/*
|
|
|
|
* These commands have a 1:1 correspondence
|
|
|
|
* between DSM payload and libnvdimm ioctl
|
|
|
|
* payload format.
|
|
|
|
*/
|
|
|
|
cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
|
|
|
|
}
|
2016-04-29 07:23:43 +08:00
|
|
|
|
2019-02-03 08:35:26 +08:00
|
|
|
/* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
|
|
|
|
if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
|
|
|
|
set_bit(NDD_NOBLK, &flags);
|
|
|
|
|
2018-09-27 01:48:38 +08:00
|
|
|
if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
|
2017-09-25 00:57:34 +08:00
|
|
|
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
|
|
|
|
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
|
2018-03-29 01:44:50 +08:00
|
|
|
}
|
2018-09-27 01:48:38 +08:00
|
|
|
if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
|
2017-09-25 00:57:34 +08:00
|
|
|
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
|
|
|
|
|
2016-06-08 08:00:04 +08:00
|
|
|
flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
|
|
|
|
: NULL;
|
2018-12-05 02:31:20 +08:00
|
|
|
nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
|
2015-06-09 02:27:06 +08:00
|
|
|
acpi_nfit_dimm_attribute_groups,
|
2016-06-08 08:00:04 +08:00
|
|
|
flags, cmd_mask, flush ? flush->hint_count : 0,
|
2018-12-06 15:39:29 +08:00
|
|
|
nfit_mem->flush_wpq, &nfit_mem->id[0],
|
2020-07-21 06:08:24 +08:00
|
|
|
acpi_nfit_get_security_ops(nfit_mem->family),
|
|
|
|
acpi_nfit_get_fw_ops(nfit_mem));
|
2015-04-25 15:56:17 +08:00
|
|
|
if (!nvdimm)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
nfit_mem->nvdimm = nvdimm;
|
2015-06-01 02:41:48 +08:00
|
|
|
dimm_count++;
|
2015-06-24 08:08:34 +08:00
|
|
|
|
|
|
|
if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
|
|
|
|
continue;
|
|
|
|
|
2019-03-01 04:12:18 +08:00
|
|
|
dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
|
2015-06-24 08:08:34 +08:00
|
|
|
nvdimm_name(nvdimm),
|
nfit: Clarify memory device state flags strings
ACPI 6.0 NFIT Memory Device State Flags in Table 5-129 defines
NVDIMM status as follows. These bits indicate multiple info,
such as failures, pending event, and capability.
Bit [0] set to 1 to indicate that the previous SAVE to the
Memory Device failed.
Bit [1] set to 1 to indicate that the last RESTORE from the
Memory Device failed.
Bit [2] set to 1 to indicate that platform flush of data to
Memory Device failed. As a result, the restored data content
may be inconsistent even if SAVE and RESTORE do not indicate
failure.
Bit [3] set to 1 to indicate that the Memory Device is observed
to be not armed prior to OSPM hand off. A Memory Device is
considered armed if it is able to accept persistent writes.
Bit [4] set to 1 to indicate that the Memory Device observed
SMART and health events prior to OSPM handoff.
/sys/bus/nd/devices/nmemX/nfit/flags shows this flags info.
The output strings associated with the bits are "save", "restore",
"smart", etc., which can be confusing as they may be interpreted
as positive status, i.e. save succeeded.
Change also the dev_info() message in acpi_nfit_register_dimms()
to be consistent with the sysfs flags strings.
Reported-by: Robert Elliott <elliott@hp.com>
Signed-off-by: Toshi Kani <toshi.kani@hp.com>
[ross: rename 'not_arm' to 'not_armed']
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
[djbw: defer adding bit5, HEALTH_ENABLED, for now]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 00:20:23 +08:00
|
|
|
mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
|
|
|
|
mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
|
|
|
|
mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
|
2017-04-14 10:46:36 +08:00
|
|
|
mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
|
|
|
|
mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
|
2015-06-24 08:08:34 +08:00
|
|
|
|
2015-04-25 15:56:17 +08:00
|
|
|
}
|
|
|
|
|
2016-08-23 10:28:37 +08:00
|
|
|
rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that dimms are successfully registered, and async registration
|
|
|
|
* is flushed, attempt to enable event notification.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
|
|
|
|
struct kernfs_node *nfit_kernfs;
|
|
|
|
|
|
|
|
nvdimm = nfit_mem->nvdimm;
|
2018-02-03 05:00:36 +08:00
|
|
|
if (!nvdimm)
|
|
|
|
continue;
|
|
|
|
|
2016-08-23 10:28:37 +08:00
|
|
|
nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
|
|
|
|
if (nfit_kernfs)
|
|
|
|
nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
|
|
|
|
"flags");
|
|
|
|
sysfs_put(nfit_kernfs);
|
|
|
|
if (!nfit_mem->flags_attr)
|
|
|
|
dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
|
|
|
|
nvdimm_name(nvdimm));
|
|
|
|
}
|
|
|
|
|
|
|
|
return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
|
|
|
|
acpi_desc);
|
2015-04-25 15:56:17 +08:00
|
|
|
}
|
|
|
|
|
2017-07-01 11:53:24 +08:00
|
|
|
/*
|
|
|
|
* These constants are private because there are no kernel consumers of
|
|
|
|
* these commands.
|
|
|
|
*/
|
|
|
|
enum nfit_aux_cmds {
|
|
|
|
NFIT_CMD_TRANSLATE_SPA = 5,
|
|
|
|
NFIT_CMD_ARS_INJECT_SET = 7,
|
|
|
|
NFIT_CMD_ARS_INJECT_CLEAR = 8,
|
|
|
|
NFIT_CMD_ARS_INJECT_GET = 9,
|
|
|
|
};
|
|
|
|
|
2015-06-09 02:27:06 +08:00
|
|
|
static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
2017-06-06 00:40:42 +08:00
|
|
|
const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
|
2020-07-21 06:07:40 +08:00
|
|
|
unsigned long dsm_mask, *mask;
|
2015-06-09 02:27:06 +08:00
|
|
|
struct acpi_device *adev;
|
|
|
|
int i;
|
|
|
|
|
2020-07-21 06:07:30 +08:00
|
|
|
set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
|
|
|
|
set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask);
|
|
|
|
|
2020-07-21 06:07:40 +08:00
|
|
|
/* enable nfit_test to inject bus command emulation */
|
|
|
|
if (acpi_desc->bus_cmd_force_en) {
|
|
|
|
nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
|
|
|
|
mask = &nd_desc->bus_family_mask;
|
2020-07-21 06:08:24 +08:00
|
|
|
if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
|
2020-07-21 06:07:40 +08:00
|
|
|
set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
|
2020-07-21 06:08:24 +08:00
|
|
|
nd_desc->fw_ops = intel_bus_fw_ops;
|
|
|
|
}
|
2020-07-21 06:07:40 +08:00
|
|
|
}
|
|
|
|
|
2015-06-09 02:27:06 +08:00
|
|
|
adev = to_acpi_dev(acpi_desc);
|
|
|
|
if (!adev)
|
|
|
|
return;
|
|
|
|
|
2016-03-04 08:08:54 +08:00
|
|
|
for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
|
2017-06-06 00:40:46 +08:00
|
|
|
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
|
2016-04-29 07:17:07 +08:00
|
|
|
set_bit(i, &nd_desc->cmd_mask);
|
2017-07-01 11:53:24 +08:00
|
|
|
|
|
|
|
dsm_mask =
|
|
|
|
(1 << ND_CMD_ARS_CAP) |
|
|
|
|
(1 << ND_CMD_ARS_START) |
|
|
|
|
(1 << ND_CMD_ARS_STATUS) |
|
|
|
|
(1 << ND_CMD_CLEAR_ERROR) |
|
|
|
|
(1 << NFIT_CMD_TRANSLATE_SPA) |
|
|
|
|
(1 << NFIT_CMD_ARS_INJECT_SET) |
|
|
|
|
(1 << NFIT_CMD_ARS_INJECT_CLEAR) |
|
|
|
|
(1 << NFIT_CMD_ARS_INJECT_GET);
|
|
|
|
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
|
|
|
|
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
|
2020-07-21 06:07:35 +08:00
|
|
|
set_bit(i, &acpi_desc->bus_dsm_mask);
|
2020-07-21 06:07:40 +08:00
|
|
|
|
|
|
|
/* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */
|
|
|
|
dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
|
|
|
|
guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL);
|
|
|
|
mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
|
|
|
|
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
|
|
|
|
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
|
|
|
|
set_bit(i, mask);
|
2020-07-21 06:08:24 +08:00
|
|
|
|
|
|
|
if (*mask == dsm_mask) {
|
|
|
|
set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
|
|
|
|
nd_desc->fw_ops = intel_bus_fw_ops;
|
|
|
|
}
|
2015-06-09 02:27:06 +08:00
|
|
|
}
|
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
static ssize_t range_index_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev);
|
|
|
|
struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
|
|
|
|
|
|
|
|
return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(range_index);
|
|
|
|
|
|
|
|
static struct attribute *acpi_nfit_region_attributes[] = {
|
|
|
|
&dev_attr_range_index.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2017-06-22 18:14:41 +08:00
|
|
|
static const struct attribute_group acpi_nfit_region_attribute_group = {
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
.name = "nfit",
|
|
|
|
.attrs = acpi_nfit_region_attributes,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
|
|
|
|
&acpi_nfit_region_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2015-05-02 01:11:27 +08:00
|
|
|
/* enough info to uniquely specify an interleave set */
|
|
|
|
struct nfit_set_info {
|
|
|
|
struct nfit_set_info_map {
|
|
|
|
u64 region_offset;
|
|
|
|
u32 serial_number;
|
|
|
|
u32 pad;
|
|
|
|
} mapping[0];
|
|
|
|
};
|
|
|
|
|
2017-06-04 09:59:15 +08:00
|
|
|
struct nfit_set_info2 {
|
|
|
|
struct nfit_set_info_map2 {
|
|
|
|
u64 region_offset;
|
|
|
|
u32 serial_number;
|
|
|
|
u16 vendor_id;
|
|
|
|
u16 manufacturing_date;
|
|
|
|
u8 manufacturing_location;
|
|
|
|
u8 reserved[31];
|
|
|
|
} mapping[0];
|
|
|
|
};
|
|
|
|
|
2015-05-02 01:11:27 +08:00
|
|
|
static size_t sizeof_nfit_set_info(int num_mappings)
|
|
|
|
{
|
|
|
|
return sizeof(struct nfit_set_info)
|
|
|
|
+ num_mappings * sizeof(struct nfit_set_info_map);
|
|
|
|
}
|
|
|
|
|
2017-06-04 09:59:15 +08:00
|
|
|
static size_t sizeof_nfit_set_info2(int num_mappings)
|
|
|
|
{
|
|
|
|
return sizeof(struct nfit_set_info2)
|
|
|
|
+ num_mappings * sizeof(struct nfit_set_info_map2);
|
|
|
|
}
|
|
|
|
|
2017-03-01 10:32:48 +08:00
|
|
|
static int cmp_map_compat(const void *m0, const void *m1)
|
2015-05-02 01:11:27 +08:00
|
|
|
{
|
|
|
|
const struct nfit_set_info_map *map0 = m0;
|
|
|
|
const struct nfit_set_info_map *map1 = m1;
|
|
|
|
|
|
|
|
return memcmp(&map0->region_offset, &map1->region_offset,
|
|
|
|
sizeof(u64));
|
|
|
|
}
|
|
|
|
|
2017-03-01 10:32:48 +08:00
|
|
|
static int cmp_map(const void *m0, const void *m1)
|
|
|
|
{
|
|
|
|
const struct nfit_set_info_map *map0 = m0;
|
|
|
|
const struct nfit_set_info_map *map1 = m1;
|
|
|
|
|
2017-03-28 12:53:38 +08:00
|
|
|
if (map0->region_offset < map1->region_offset)
|
|
|
|
return -1;
|
|
|
|
else if (map0->region_offset > map1->region_offset)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
2017-03-01 10:32:48 +08:00
|
|
|
}
|
|
|
|
|
2017-06-04 09:59:15 +08:00
|
|
|
static int cmp_map2(const void *m0, const void *m1)
|
|
|
|
{
|
|
|
|
const struct nfit_set_info_map2 *map0 = m0;
|
|
|
|
const struct nfit_set_info_map2 *map1 = m1;
|
|
|
|
|
|
|
|
if (map0->region_offset < map1->region_offset)
|
|
|
|
return -1;
|
|
|
|
else if (map0->region_offset > map1->region_offset)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-02 01:11:27 +08:00
|
|
|
/* Retrieve the nth entry referencing this spa */
|
|
|
|
static struct acpi_nfit_memory_map *memdev_from_spa(
|
|
|
|
struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
|
|
|
|
{
|
|
|
|
struct nfit_memdev *nfit_memdev;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
|
|
|
|
if (nfit_memdev->memdev->range_index == range_index)
|
|
|
|
if (n-- == 0)
|
|
|
|
return nfit_memdev->memdev;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nd_region_desc *ndr_desc,
|
|
|
|
struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
struct nd_interleave_set *nd_set;
|
|
|
|
u16 nr = ndr_desc->num_mappings;
|
2017-06-04 09:59:15 +08:00
|
|
|
struct nfit_set_info2 *info2;
|
2015-05-02 01:11:27 +08:00
|
|
|
struct nfit_set_info *info;
|
2017-06-07 02:39:30 +08:00
|
|
|
int i;
|
2015-05-02 01:11:27 +08:00
|
|
|
|
2017-06-07 02:10:51 +08:00
|
|
|
nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
|
|
|
|
if (!nd_set)
|
|
|
|
return -ENOMEM;
|
2020-04-22 21:05:39 +08:00
|
|
|
import_guid(&nd_set->type_guid, spa->range_guid);
|
2017-06-07 02:10:51 +08:00
|
|
|
|
2015-05-02 01:11:27 +08:00
|
|
|
info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
|
|
|
|
if (!info)
|
|
|
|
return -ENOMEM;
|
2017-06-04 09:59:15 +08:00
|
|
|
|
|
|
|
info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
|
|
|
|
if (!info2)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-05-02 01:11:27 +08:00
|
|
|
for (i = 0; i < nr; i++) {
|
2016-09-20 07:38:50 +08:00
|
|
|
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
|
2015-05-02 01:11:27 +08:00
|
|
|
struct nfit_set_info_map *map = &info->mapping[i];
|
2017-06-04 09:59:15 +08:00
|
|
|
struct nfit_set_info_map2 *map2 = &info2->mapping[i];
|
2016-09-20 07:38:50 +08:00
|
|
|
struct nvdimm *nvdimm = mapping->nvdimm;
|
2015-05-02 01:11:27 +08:00
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
|
|
|
|
spa->range_index, i);
|
2017-08-08 05:27:57 +08:00
|
|
|
struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
|
2015-05-02 01:11:27 +08:00
|
|
|
|
|
|
|
if (!memdev || !nfit_mem->dcr) {
|
|
|
|
dev_err(dev, "%s: failed to find DCR\n", __func__);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
map->region_offset = memdev->region_offset;
|
2017-08-08 05:27:57 +08:00
|
|
|
map->serial_number = dcr->serial_number;
|
2017-06-04 09:59:15 +08:00
|
|
|
|
|
|
|
map2->region_offset = memdev->region_offset;
|
2017-08-08 05:27:57 +08:00
|
|
|
map2->serial_number = dcr->serial_number;
|
|
|
|
map2->vendor_id = dcr->vendor_id;
|
|
|
|
map2->manufacturing_date = dcr->manufacturing_date;
|
|
|
|
map2->manufacturing_location = dcr->manufacturing_location;
|
2015-05-02 01:11:27 +08:00
|
|
|
}
|
|
|
|
|
2017-06-04 09:59:15 +08:00
|
|
|
/* v1.1 namespaces */
|
2015-05-02 01:11:27 +08:00
|
|
|
sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
|
|
|
|
cmp_map, NULL);
|
2017-06-04 09:59:15 +08:00
|
|
|
nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
|
|
|
|
|
|
|
|
/* v1.2 namespaces */
|
|
|
|
sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
|
|
|
|
cmp_map2, NULL);
|
|
|
|
nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
|
2017-03-01 10:32:48 +08:00
|
|
|
|
2017-06-04 09:59:15 +08:00
|
|
|
/* support v1.1 namespaces created with the wrong sort order */
|
2017-03-01 10:32:48 +08:00
|
|
|
sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
|
|
|
|
cmp_map_compat, NULL);
|
|
|
|
nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
|
|
|
|
|
2017-08-05 08:20:16 +08:00
|
|
|
/* record the result of the sort for the mapping position */
|
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
struct nfit_set_info_map2 *map2 = &info2->mapping[i];
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < nr; j++) {
|
|
|
|
struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
|
|
|
|
struct nvdimm *nvdimm = mapping->nvdimm;
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
2017-08-08 05:27:57 +08:00
|
|
|
struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
|
2017-08-05 08:20:16 +08:00
|
|
|
|
2017-08-08 05:27:57 +08:00
|
|
|
if (map2->serial_number == dcr->serial_number &&
|
|
|
|
map2->vendor_id == dcr->vendor_id &&
|
|
|
|
map2->manufacturing_date == dcr->manufacturing_date &&
|
2017-08-05 08:20:16 +08:00
|
|
|
map2->manufacturing_location
|
2017-08-08 05:27:57 +08:00
|
|
|
== dcr->manufacturing_location) {
|
2017-08-05 08:20:16 +08:00
|
|
|
mapping->position = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-02 01:11:27 +08:00
|
|
|
ndr_desc->nd_set = nd_set;
|
|
|
|
devm_kfree(dev, info);
|
2017-06-04 09:59:15 +08:00
|
|
|
devm_kfree(dev, info2);
|
2015-05-02 01:11:27 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-25 16:21:02 +08:00
|
|
|
static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_interleave *idt = mmio->idt;
|
|
|
|
u32 sub_line_offset, line_index, line_offset;
|
|
|
|
u64 line_no, table_skip_count, table_offset;
|
|
|
|
|
|
|
|
line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
|
|
|
|
table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
|
|
|
|
line_offset = idt->line_offset[line_index]
|
|
|
|
* mmio->line_size;
|
|
|
|
table_offset = table_skip_count * mmio->table_size;
|
|
|
|
|
|
|
|
return mmio->base_offset + line_offset + table_offset + sub_line_offset;
|
|
|
|
}
|
|
|
|
|
2015-08-21 06:27:38 +08:00
|
|
|
static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
|
2015-06-25 16:21:02 +08:00
|
|
|
{
|
|
|
|
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
|
|
|
|
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
|
libnvdimm, nd_blk: mask off reserved status bits
The "NVDIMM Block Window Driver Writer's Guide":
http://pmem.io/documents/NVDIMM_DriverWritersGuide-July-2016.pdf
...defines the layout of the block window status register. For the July
2016 version of the spec linked to above, this happens in Figure 4 on
page 26.
The only bits defined in this spec are bits 31, 5, 4, 2, 1 and 0. The
rest of the bits in the status register are reserved, and there is a
warning following the diagram that says:
Note: The driver cannot assume the value of the RESERVED bits in the
status register are zero. These reserved bits need to be masked off, and
the driver must avoid checking the state of those bits.
This change ensures that for hardware implementations that set these
reserved bits in the status register, the driver won't incorrectly fail the
block I/Os.
Cc: <stable@vger.kernel.org> #v4.2+
Reviewed-by: Lee, Chun-Yi <jlee@suse.com>
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2016-07-30 04:59:12 +08:00
|
|
|
const u32 STATUS_MASK = 0x80000037;
|
2015-06-25 16:21:02 +08:00
|
|
|
|
|
|
|
if (mmio->num_lines)
|
|
|
|
offset = to_interleave_offset(offset, mmio);
|
|
|
|
|
libnvdimm, nd_blk: mask off reserved status bits
The "NVDIMM Block Window Driver Writer's Guide":
http://pmem.io/documents/NVDIMM_DriverWritersGuide-July-2016.pdf
...defines the layout of the block window status register. For the July
2016 version of the spec linked to above, this happens in Figure 4 on
page 26.
The only bits defined in this spec are bits 31, 5, 4, 2, 1 and 0. The
rest of the bits in the status register are reserved, and there is a
warning following the diagram that says:
Note: The driver cannot assume the value of the RESERVED bits in the
status register are zero. These reserved bits need to be masked off, and
the driver must avoid checking the state of those bits.
This change ensures that for hardware implementations that set these
reserved bits in the status register, the driver won't incorrectly fail the
block I/Os.
Cc: <stable@vger.kernel.org> #v4.2+
Reviewed-by: Lee, Chun-Yi <jlee@suse.com>
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2016-07-30 04:59:12 +08:00
|
|
|
return readl(mmio->addr.base + offset) & STATUS_MASK;
|
2015-06-25 16:21:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
|
|
|
|
resource_size_t dpa, unsigned int len, unsigned int write)
|
|
|
|
{
|
|
|
|
u64 cmd, offset;
|
|
|
|
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
|
|
|
|
|
|
|
|
enum {
|
|
|
|
BCW_OFFSET_MASK = (1ULL << 48)-1,
|
|
|
|
BCW_LEN_SHIFT = 48,
|
|
|
|
BCW_LEN_MASK = (1ULL << 8) - 1,
|
|
|
|
BCW_CMD_SHIFT = 56,
|
|
|
|
};
|
|
|
|
|
|
|
|
cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
|
|
|
|
len = len >> L1_CACHE_SHIFT;
|
|
|
|
cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
|
|
|
|
cmd |= ((u64) write) << BCW_CMD_SHIFT;
|
|
|
|
|
|
|
|
offset = nfit_blk->cmd_offset + mmio->size * bw;
|
|
|
|
if (mmio->num_lines)
|
|
|
|
offset = to_interleave_offset(offset, mmio);
|
|
|
|
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-28 03:14:20 +08:00
|
|
|
writeq(cmd, mmio->addr.base + offset);
|
2019-07-05 22:03:22 +08:00
|
|
|
nvdimm_flush(nfit_blk->nd_region, NULL);
|
2015-07-11 01:06:14 +08:00
|
|
|
|
2016-02-13 09:01:11 +08:00
|
|
|
if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-28 03:14:20 +08:00
|
|
|
readq(mmio->addr.base + offset);
|
2015-06-25 16:21:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
|
|
|
|
resource_size_t dpa, void *iobuf, size_t len, int rw,
|
|
|
|
unsigned int lane)
|
|
|
|
{
|
|
|
|
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
|
|
|
|
unsigned int copied = 0;
|
|
|
|
u64 base_offset;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
|
|
|
|
+ lane * mmio->size;
|
|
|
|
write_blk_ctl(nfit_blk, lane, dpa, len, rw);
|
|
|
|
while (len) {
|
|
|
|
unsigned int c;
|
|
|
|
u64 offset;
|
|
|
|
|
|
|
|
if (mmio->num_lines) {
|
|
|
|
u32 line_offset;
|
|
|
|
|
|
|
|
offset = to_interleave_offset(base_offset + copied,
|
|
|
|
mmio);
|
|
|
|
div_u64_rem(offset, mmio->line_size, &line_offset);
|
|
|
|
c = min_t(size_t, len, mmio->line_size - line_offset);
|
|
|
|
} else {
|
|
|
|
offset = base_offset + nfit_blk->bdw_offset;
|
|
|
|
c = len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rw)
|
2017-05-30 03:22:50 +08:00
|
|
|
memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-28 03:14:20 +08:00
|
|
|
else {
|
2016-02-13 09:01:11 +08:00
|
|
|
if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
|
2017-08-31 19:27:09 +08:00
|
|
|
arch_invalidate_pmem((void __force *)
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-28 03:14:20 +08:00
|
|
|
mmio->addr.aperture + offset, c);
|
|
|
|
|
2017-01-14 06:14:23 +08:00
|
|
|
memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-28 03:14:20 +08:00
|
|
|
}
|
2015-06-25 16:21:02 +08:00
|
|
|
|
|
|
|
copied += c;
|
|
|
|
len -= c;
|
|
|
|
}
|
2015-07-11 01:06:13 +08:00
|
|
|
|
|
|
|
if (rw)
|
2019-07-05 22:03:22 +08:00
|
|
|
nvdimm_flush(nfit_blk->nd_region, NULL);
|
2015-07-11 01:06:13 +08:00
|
|
|
|
2015-06-25 16:21:02 +08:00
|
|
|
rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
|
|
|
|
resource_size_t dpa, void *iobuf, u64 len, int rw)
|
|
|
|
{
|
|
|
|
struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
|
|
|
|
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
|
|
|
|
struct nd_region *nd_region = nfit_blk->nd_region;
|
|
|
|
unsigned int lane, copied = 0;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
lane = nd_region_acquire_lane(nd_region);
|
|
|
|
while (len) {
|
|
|
|
u64 c = min(len, mmio->size);
|
|
|
|
|
|
|
|
rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
|
|
|
|
iobuf + copied, c, rw, lane);
|
|
|
|
if (rc)
|
|
|
|
break;
|
|
|
|
|
|
|
|
copied += c;
|
|
|
|
len -= c;
|
|
|
|
}
|
|
|
|
nd_region_release_lane(nd_region, lane);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
|
|
|
|
struct acpi_nfit_interleave *idt, u16 interleave_ways)
|
|
|
|
{
|
|
|
|
if (idt) {
|
|
|
|
mmio->num_lines = idt->line_count;
|
|
|
|
mmio->line_size = idt->line_size;
|
|
|
|
if (interleave_ways == 0)
|
|
|
|
return -ENXIO;
|
|
|
|
mmio->table_size = mmio->num_lines * interleave_ways
|
|
|
|
* mmio->line_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-11 01:06:14 +08:00
|
|
|
static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
|
|
|
|
struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
|
|
|
|
{
|
|
|
|
struct nd_cmd_dimm_flags flags;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
memset(&flags, 0, sizeof(flags));
|
|
|
|
rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
|
2016-02-13 09:01:11 +08:00
|
|
|
sizeof(flags), NULL);
|
2015-07-11 01:06:14 +08:00
|
|
|
|
|
|
|
if (rc >= 0 && flags.status == 0)
|
|
|
|
nfit_blk->dimm_flags = flags.flags;
|
|
|
|
else if (rc == -ENOTTY) {
|
|
|
|
/* fall back to a conservative default */
|
2016-02-13 09:01:11 +08:00
|
|
|
nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
|
2015-07-11 01:06:14 +08:00
|
|
|
rc = 0;
|
|
|
|
} else
|
|
|
|
rc = -ENXIO;
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-06-25 16:21:02 +08:00
|
|
|
static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
|
|
|
struct nd_blk_region *ndbr = to_nd_blk_region(dev);
|
|
|
|
struct nfit_blk_mmio *mmio;
|
|
|
|
struct nfit_blk *nfit_blk;
|
|
|
|
struct nfit_mem *nfit_mem;
|
|
|
|
struct nvdimm *nvdimm;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
nvdimm = nd_blk_region_to_dimm(ndbr);
|
|
|
|
nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "missing%s%s%s\n",
|
2015-06-25 16:21:02 +08:00
|
|
|
nfit_mem ? "" : " nfit_mem",
|
2015-07-01 04:09:39 +08:00
|
|
|
(nfit_mem && nfit_mem->dcr) ? "" : " dcr",
|
|
|
|
(nfit_mem && nfit_mem->bdw) ? "" : " bdw");
|
2015-06-25 16:21:02 +08:00
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
|
|
|
|
if (!nfit_blk)
|
|
|
|
return -ENOMEM;
|
|
|
|
nd_blk_region_set_provider_data(ndbr, nfit_blk);
|
|
|
|
nfit_blk->nd_region = to_nd_region(dev);
|
|
|
|
|
|
|
|
/* map block aperture memory */
|
|
|
|
nfit_blk->bdw_offset = nfit_mem->bdw->offset;
|
|
|
|
mmio = &nfit_blk->mmio[BDW];
|
2016-06-07 08:42:38 +08:00
|
|
|
mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
|
2017-01-14 12:36:58 +08:00
|
|
|
nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-28 03:14:20 +08:00
|
|
|
if (!mmio->addr.base) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "%s failed to map bdw\n",
|
2015-06-25 16:21:02 +08:00
|
|
|
nvdimm_name(nvdimm));
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
mmio->size = nfit_mem->bdw->size;
|
|
|
|
mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
|
|
|
|
mmio->idt = nfit_mem->idt_bdw;
|
|
|
|
mmio->spa = nfit_mem->spa_bdw;
|
|
|
|
rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
|
|
|
|
nfit_mem->memdev_bdw->interleave_ways);
|
|
|
|
if (rc) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "%s failed to init bdw interleave\n",
|
|
|
|
nvdimm_name(nvdimm));
|
2015-06-25 16:21:02 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* map block control memory */
|
|
|
|
nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
|
|
|
|
nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
|
|
|
|
mmio = &nfit_blk->mmio[DCR];
|
2016-06-07 08:42:38 +08:00
|
|
|
mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
|
|
|
|
nfit_mem->spa_dcr->length);
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-28 03:14:20 +08:00
|
|
|
if (!mmio->addr.base) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "%s failed to map dcr\n",
|
2015-06-25 16:21:02 +08:00
|
|
|
nvdimm_name(nvdimm));
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
mmio->size = nfit_mem->dcr->window_size;
|
|
|
|
mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
|
|
|
|
mmio->idt = nfit_mem->idt_dcr;
|
|
|
|
mmio->spa = nfit_mem->spa_dcr;
|
|
|
|
rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
|
|
|
|
nfit_mem->memdev_dcr->interleave_ways);
|
|
|
|
if (rc) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "%s failed to init dcr interleave\n",
|
|
|
|
nvdimm_name(nvdimm));
|
2015-06-25 16:21:02 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-07-11 01:06:14 +08:00
|
|
|
rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
|
|
|
|
if (rc < 0) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "%s failed get DIMM flags\n",
|
|
|
|
nvdimm_name(nvdimm));
|
2015-07-11 01:06:14 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2016-07-08 10:44:50 +08:00
|
|
|
if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
|
2015-07-11 01:06:13 +08:00
|
|
|
dev_warn(dev, "unable to guarantee persistence of writes\n");
|
|
|
|
|
2015-06-25 16:21:02 +08:00
|
|
|
if (mmio->line_size == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((u32) nfit_blk->cmd_offset % mmio->line_size
|
|
|
|
+ 8 > mmio->line_size) {
|
|
|
|
dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
|
|
|
|
return -ENXIO;
|
|
|
|
} else if ((u32) nfit_blk->stat_offset % mmio->line_size
|
|
|
|
+ 8 > mmio->line_size) {
|
|
|
|
dev_dbg(dev, "stat_offset crosses interleave boundary\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-13 09:01:11 +08:00
|
|
|
static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
|
2016-02-18 05:01:23 +08:00
|
|
|
struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
|
2015-12-25 10:21:43 +08:00
|
|
|
{
|
2016-02-13 09:01:11 +08:00
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
2016-02-18 05:01:23 +08:00
|
|
|
struct acpi_nfit_system_address *spa = nfit_spa->spa;
|
2016-02-13 09:01:11 +08:00
|
|
|
int cmd_rc, rc;
|
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
cmd->address = spa->address;
|
|
|
|
cmd->length = spa->length;
|
2016-02-13 09:01:11 +08:00
|
|
|
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
|
|
|
|
sizeof(*cmd), &cmd_rc);
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
2016-02-18 05:01:23 +08:00
|
|
|
return cmd_rc;
|
2015-12-25 10:21:43 +08:00
|
|
|
}
|
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
static int ars_start(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
|
2015-12-25 10:21:43 +08:00
|
|
|
{
|
|
|
|
int rc;
|
2016-02-18 05:01:23 +08:00
|
|
|
int cmd_rc;
|
|
|
|
struct nd_cmd_ars_start ars_start;
|
|
|
|
struct acpi_nfit_system_address *spa = nfit_spa->spa;
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
2015-12-25 10:21:43 +08:00
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
memset(&ars_start, 0, sizeof(ars_start));
|
|
|
|
ars_start.address = spa->address;
|
|
|
|
ars_start.length = spa->length;
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
if (req_type == ARS_REQ_SHORT)
|
2018-04-06 07:18:55 +08:00
|
|
|
ars_start.flags = ND_ARS_RETURN_PREV_DATA;
|
2016-02-18 05:01:23 +08:00
|
|
|
if (nfit_spa_type(spa) == NFIT_SPA_PM)
|
|
|
|
ars_start.type = ND_ARS_PERSISTENT;
|
|
|
|
else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
|
|
|
|
ars_start.type = ND_ARS_VOLATILE;
|
|
|
|
else
|
|
|
|
return -ENOTTY;
|
2016-02-13 09:01:11 +08:00
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
|
|
|
|
sizeof(ars_start), &cmd_rc);
|
2016-02-13 09:01:11 +08:00
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
2019-02-14 01:28:40 +08:00
|
|
|
if (cmd_rc < 0)
|
|
|
|
return cmd_rc;
|
|
|
|
set_bit(ARS_VALID, &acpi_desc->scrub_flags);
|
|
|
|
return 0;
|
2015-12-25 10:21:43 +08:00
|
|
|
}
|
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
static int ars_continue(struct acpi_nfit_desc *acpi_desc)
|
2015-12-25 10:21:43 +08:00
|
|
|
{
|
2016-02-13 09:01:11 +08:00
|
|
|
int rc, cmd_rc;
|
2016-02-18 05:01:23 +08:00
|
|
|
struct nd_cmd_ars_start ars_start;
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
|
|
|
struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
|
|
|
|
|
2019-02-14 01:34:00 +08:00
|
|
|
ars_start = (struct nd_cmd_ars_start) {
|
|
|
|
.address = ars_status->restart_address,
|
|
|
|
.length = ars_status->restart_length,
|
|
|
|
.type = ars_status->type,
|
|
|
|
};
|
2016-02-18 05:01:23 +08:00
|
|
|
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
|
|
|
|
sizeof(ars_start), &cmd_rc);
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
return cmd_rc;
|
|
|
|
}
|
2015-12-25 10:21:43 +08:00
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
|
|
|
struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
|
|
|
|
int rc, cmd_rc;
|
2016-02-13 09:01:11 +08:00
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
|
2018-04-05 16:25:02 +08:00
|
|
|
acpi_desc->max_ars, &cmd_rc);
|
2016-02-18 05:01:23 +08:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
return cmd_rc;
|
2015-12-25 10:21:43 +08:00
|
|
|
}
|
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
static void ars_complete(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_spa *nfit_spa)
|
|
|
|
{
|
|
|
|
struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
|
|
|
|
struct acpi_nfit_system_address *spa = nfit_spa->spa;
|
|
|
|
struct nd_region *nd_region = nfit_spa->nd_region;
|
|
|
|
struct device *dev;
|
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
lockdep_assert_held(&acpi_desc->init_mutex);
|
|
|
|
/*
|
|
|
|
* Only advance the ARS state for ARS runs initiated by the
|
|
|
|
* kernel, ignore ARS results from BIOS initiated runs for scrub
|
|
|
|
* completion tracking.
|
|
|
|
*/
|
|
|
|
if (acpi_desc->scrub_spa != nfit_spa)
|
|
|
|
return;
|
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
if ((ars_status->address >= spa->address && ars_status->address
|
|
|
|
< spa->address + spa->length)
|
|
|
|
|| (ars_status->address < spa->address)) {
|
|
|
|
/*
|
|
|
|
* Assume that if a scrub starts at an offset from the
|
|
|
|
* start of nfit_spa that we are in the continuation
|
|
|
|
* case.
|
|
|
|
*
|
|
|
|
* Otherwise, if the scrub covers the spa range, mark
|
|
|
|
* any pending request complete.
|
|
|
|
*/
|
|
|
|
if (ars_status->address + ars_status->length
|
|
|
|
>= spa->address + spa->length)
|
|
|
|
/* complete */;
|
|
|
|
else
|
|
|
|
return;
|
|
|
|
} else
|
|
|
|
return;
|
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
acpi_desc->scrub_spa = NULL;
|
2018-04-06 07:18:55 +08:00
|
|
|
if (nd_region) {
|
|
|
|
dev = nd_region_dev(nd_region);
|
|
|
|
nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
|
|
|
|
} else
|
|
|
|
dev = acpi_desc->dev;
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
|
2018-04-06 07:18:55 +08:00
|
|
|
}
|
|
|
|
|
2018-04-05 16:25:02 +08:00
|
|
|
static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
|
2015-12-25 10:21:43 +08:00
|
|
|
{
|
2016-12-07 04:45:24 +08:00
|
|
|
struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
|
2018-04-05 16:25:02 +08:00
|
|
|
struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
|
2015-12-25 10:21:43 +08:00
|
|
|
int rc;
|
|
|
|
u32 i;
|
|
|
|
|
2016-12-07 04:45:24 +08:00
|
|
|
/*
|
|
|
|
* First record starts at 44 byte offset from the start of the
|
|
|
|
* payload.
|
|
|
|
*/
|
|
|
|
if (ars_status->out_length < 44)
|
|
|
|
return 0;
|
2019-02-14 01:28:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore potentially stale results that are only refreshed
|
|
|
|
* after a start-ARS event.
|
|
|
|
*/
|
|
|
|
if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
|
|
|
|
dev_dbg(acpi_desc->dev, "skip %d stale records\n",
|
|
|
|
ars_status->num_records);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-25 10:21:43 +08:00
|
|
|
for (i = 0; i < ars_status->num_records; i++) {
|
2016-12-07 04:45:24 +08:00
|
|
|
/* only process full records */
|
|
|
|
if (ars_status->out_length
|
|
|
|
< 44 + sizeof(struct nd_ars_record) * (i + 1))
|
|
|
|
break;
|
2017-08-24 03:48:26 +08:00
|
|
|
rc = nvdimm_bus_add_badrange(nvdimm_bus,
|
2015-12-25 10:21:43 +08:00
|
|
|
ars_status->records[i].err_address,
|
|
|
|
ars_status->records[i].length);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
2016-12-07 04:45:24 +08:00
|
|
|
if (i < ars_status->num_records)
|
|
|
|
dev_warn(acpi_desc->dev, "detected truncated ars results\n");
|
2015-12-25 10:21:43 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
ACPI: Change NFIT driver to insert new resource
ACPI 6 defines persistent memory (PMEM) ranges in multiple
firmware interfaces, e820, EFI, and ACPI NFIT table. This EFI
change, however, leads to hit a bug in the grub bootloader, which
treats EFI_PERSISTENT_MEMORY type as regular memory and corrupts
stored user data [1].
Therefore, BIOS may set generic reserved type in e820 and EFI to
cover PMEM ranges. The kernel can initialize PMEM ranges from
ACPI NFIT table alone.
This scheme causes a problem in the iomem table, though. On x86,
for instance, e820_reserve_resources() initializes top-level entries
(iomem_resource.child) from the e820 table at early boot-time.
This creates "reserved" entry for a PMEM range, which does not allow
region_intersects() to check with PMEM type.
Change acpi_nfit_register_region() to call acpi_nfit_insert_resource(),
which calls insert_resource() to insert a PMEM entry from NFIT when
the iomem table does not have a PMEM entry already. That is, when
a PMEM range is marked as reserved type in e820, it inserts
"Persistent Memory" entry, which results as follows.
+ "Persistent Memory"
+ "reserved"
This allows the EINJ driver, which calls region_intersects() to check
PMEM ranges, to work continuously even if BIOS sets reserved type
(or sets nothing) to PMEM ranges in e820 and EFI.
[1]: https://lists.gnu.org/archive/html/grub-devel/2015-11/msg00209.html
Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2016-03-10 03:47:06 +08:00
|
|
|
static void acpi_nfit_remove_resource(void *data)
|
|
|
|
{
|
|
|
|
struct resource *res = data;
|
|
|
|
|
|
|
|
remove_resource(res);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nd_region_desc *ndr_desc)
|
|
|
|
{
|
|
|
|
struct resource *res, *nd_res = ndr_desc->res;
|
|
|
|
int is_pmem, ret;
|
|
|
|
|
|
|
|
/* No operation if the region is already registered as PMEM */
|
|
|
|
is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
|
|
|
|
IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
|
|
|
|
if (is_pmem == REGION_INTERSECTS)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
|
|
|
|
if (!res)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
res->name = "Persistent Memory";
|
|
|
|
res->start = nd_res->start;
|
|
|
|
res->end = nd_res->end;
|
|
|
|
res->flags = IORESOURCE_MEM;
|
|
|
|
res->desc = IORES_DESC_PERSISTENT_MEMORY;
|
|
|
|
|
|
|
|
ret = insert_resource(&iomem_resource, res);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-07-04 12:32:51 +08:00
|
|
|
ret = devm_add_action_or_reset(acpi_desc->dev,
|
|
|
|
acpi_nfit_remove_resource,
|
|
|
|
res);
|
|
|
|
if (ret)
|
ACPI: Change NFIT driver to insert new resource
ACPI 6 defines persistent memory (PMEM) ranges in multiple
firmware interfaces, e820, EFI, and ACPI NFIT table. This EFI
change, however, leads to hit a bug in the grub bootloader, which
treats EFI_PERSISTENT_MEMORY type as regular memory and corrupts
stored user data [1].
Therefore, BIOS may set generic reserved type in e820 and EFI to
cover PMEM ranges. The kernel can initialize PMEM ranges from
ACPI NFIT table alone.
This scheme causes a problem in the iomem table, though. On x86,
for instance, e820_reserve_resources() initializes top-level entries
(iomem_resource.child) from the e820 table at early boot-time.
This creates "reserved" entry for a PMEM range, which does not allow
region_intersects() to check with PMEM type.
Change acpi_nfit_register_region() to call acpi_nfit_insert_resource(),
which calls insert_resource() to insert a PMEM entry from NFIT when
the iomem table does not have a PMEM entry already. That is, when
a PMEM range is marked as reserved type in e820, it inserts
"Persistent Memory" entry, which results as follows.
+ "Persistent Memory"
+ "reserved"
This allows the EINJ driver, which calls region_intersects() to check
PMEM ranges, to work continuously even if BIOS sets reserved type
(or sets nothing) to PMEM ranges in e820 and EFI.
[1]: https://lists.gnu.org/archive/html/grub-devel/2015-11/msg00209.html
Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2016-03-10 03:47:06 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
|
2016-09-20 07:38:50 +08:00
|
|
|
struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
struct acpi_nfit_memory_map *memdev,
|
2016-02-18 05:01:23 +08:00
|
|
|
struct nfit_spa *nfit_spa)
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
|
|
|
|
memdev->device_handle);
|
2016-02-18 05:01:23 +08:00
|
|
|
struct acpi_nfit_system_address *spa = nfit_spa->spa;
|
2015-06-25 16:21:02 +08:00
|
|
|
struct nd_blk_region_desc *ndbr_desc;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
struct nfit_mem *nfit_mem;
|
2018-03-22 12:22:34 +08:00
|
|
|
int rc;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
|
|
|
|
if (!nvdimm) {
|
|
|
|
dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
|
|
|
|
spa->range_index, memdev->device_handle);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2016-09-20 07:38:50 +08:00
|
|
|
mapping->nvdimm = nvdimm;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
switch (nfit_spa_type(spa)) {
|
|
|
|
case NFIT_SPA_PM:
|
|
|
|
case NFIT_SPA_VOLATILE:
|
2016-09-20 07:38:50 +08:00
|
|
|
mapping->start = memdev->address;
|
|
|
|
mapping->size = memdev->region_size;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
break;
|
|
|
|
case NFIT_SPA_DCR:
|
|
|
|
nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
if (!nfit_mem || !nfit_mem->bdw) {
|
|
|
|
dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
|
|
|
|
spa->range_index, nvdimm_name(nvdimm));
|
2018-03-22 12:22:34 +08:00
|
|
|
break;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
}
|
|
|
|
|
2018-03-22 12:22:34 +08:00
|
|
|
mapping->size = nfit_mem->bdw->capacity;
|
|
|
|
mapping->start = nfit_mem->bdw->start_address;
|
|
|
|
ndr_desc->num_lanes = nfit_mem->bdw->windows;
|
2016-09-20 07:38:50 +08:00
|
|
|
ndr_desc->mapping = mapping;
|
2018-03-22 12:22:34 +08:00
|
|
|
ndr_desc->num_mappings = 1;
|
2015-06-25 16:21:02 +08:00
|
|
|
ndbr_desc = to_blk_region_desc(ndr_desc);
|
|
|
|
ndbr_desc->enable = acpi_nfit_blk_region_enable;
|
2015-06-18 05:23:32 +08:00
|
|
|
ndbr_desc->do_io = acpi_desc->blk_do_io;
|
2017-06-07 02:10:51 +08:00
|
|
|
rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2016-02-18 05:01:23 +08:00
|
|
|
nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
|
|
|
|
ndr_desc);
|
|
|
|
if (!nfit_spa->nd_region)
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
acpi, nfit: treat virtual ramdisk SPA as pmem region
This patch adds logic to treat virtual ramdisk SPA as pmem region, then
ramdisk's /dev/pmem* device can be mounted with iso9660.
It's useful to work with the httpboot in EFI firmware to pull a remote
ISO file to the local memory region for booting and installation.
Wiki page of UEFI HTTPBoot with OVMF:
https://en.opensuse.org/UEFI_HTTPBoot_with_OVMF
The ramdisk function in EDK2/OVMF generates a ACPI0012 root device that
it contains empty _STA but without _DSM:
DefinitionBlock ("ssdt2.aml", "SSDT", 2, "INTEL ", "RamDisk ", 0x00001000)
{
Scope (\_SB)
{
Device (NVDR)
{
Name (_HID, "ACPI0012") // _HID: Hardware ID
Name (_STR, Unicode ("NVDIMM Root Device")) // _STR: Description String
Method (_STA, 0, NotSerialized) // _STA: Status
{
Return (0x0F)
}
}
}
}
In section 5.2.25.2 of ACPI 6.1 spec, it mentions that the "SPA Range
Structure Index" of virtual SPA shall be set to zero. That means virtual SPA
will not be associated by any NVDIMM region mapping.
The VCD's SPA Range Structure in NFIT is similar to virtual disk region
as following:
[028h 0040 2] Subtable Type : 0000 [System Physical Address Range]
[02Ah 0042 2] Length : 0038
[02Ch 0044 2] Range Index : 0000
[02Eh 0046 2] Flags (decoded below) : 0000
Add/Online Operation Only : 0
Proximity Domain Valid : 0
[030h 0048 4] Reserved : 00000000
[034h 0052 4] Proximity Domain : 00000000
[038h 0056 16] Address Range GUID : 77AB535A-45FC-624B-5560-F7B281D1F96E
[048h 0072 8] Address Range Base : 00000000B6ABD018
[050h 0080 8] Address Range Length : 0000000005500000
[058h 0088 8] Memory Map Attribute : 0000000000000000
The way to not associate a SPA range is to never reference it from a "flush hint",
"interleave", or "control region" table.
After testing on OVMF, pmem driver can support the region that it doesn't
assoicate to any NVDIMM mapping. So, treat VCD like pmem is a idea to get
a pmem block device that it contains iso.
v4:
Instoduce nfit_spa_is_virtual() to check virtual ramdisk SPA and create
pmem region.
v3:
To simplify patch, removed useless VCD region in libnvdimm.
v2:
Removed the code for setting VCD to a read-only region.
Cc: Gary Lin <GLin@suse.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Linda Knippers <linda.knippers@hpe.com>
Signed-off-by: Lee, Chun-Yi <jlee@suse.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2016-07-15 12:05:35 +08:00
|
|
|
static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
|
|
|
|
nfit_spa_type(spa) == NFIT_SPA_VCD ||
|
|
|
|
nfit_spa_type(spa) == NFIT_SPA_PDISK ||
|
|
|
|
nfit_spa_type(spa) == NFIT_SPA_PCD);
|
|
|
|
}
|
|
|
|
|
2017-05-30 14:12:19 +08:00
|
|
|
static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
|
|
|
|
nfit_spa_type(spa) == NFIT_SPA_VCD ||
|
|
|
|
nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
|
|
|
|
}
|
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_spa *nfit_spa)
|
|
|
|
{
|
2016-09-20 07:38:50 +08:00
|
|
|
static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
struct acpi_nfit_system_address *spa = nfit_spa->spa;
|
2015-06-25 16:21:02 +08:00
|
|
|
struct nd_blk_region_desc ndbr_desc;
|
|
|
|
struct nd_region_desc *ndr_desc;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
struct nfit_memdev *nfit_memdev;
|
|
|
|
struct nvdimm_bus *nvdimm_bus;
|
|
|
|
struct resource res;
|
2015-05-02 01:11:27 +08:00
|
|
|
int count = 0, rc;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
if (nfit_spa->nd_region)
|
2015-10-28 06:58:27 +08:00
|
|
|
return 0;
|
|
|
|
|
acpi, nfit: treat virtual ramdisk SPA as pmem region
This patch adds logic to treat virtual ramdisk SPA as pmem region, then
ramdisk's /dev/pmem* device can be mounted with iso9660.
It's useful to work with the httpboot in EFI firmware to pull a remote
ISO file to the local memory region for booting and installation.
Wiki page of UEFI HTTPBoot with OVMF:
https://en.opensuse.org/UEFI_HTTPBoot_with_OVMF
The ramdisk function in EDK2/OVMF generates a ACPI0012 root device that
it contains empty _STA but without _DSM:
DefinitionBlock ("ssdt2.aml", "SSDT", 2, "INTEL ", "RamDisk ", 0x00001000)
{
Scope (\_SB)
{
Device (NVDR)
{
Name (_HID, "ACPI0012") // _HID: Hardware ID
Name (_STR, Unicode ("NVDIMM Root Device")) // _STR: Description String
Method (_STA, 0, NotSerialized) // _STA: Status
{
Return (0x0F)
}
}
}
}
In section 5.2.25.2 of ACPI 6.1 spec, it mentions that the "SPA Range
Structure Index" of virtual SPA shall be set to zero. That means virtual SPA
will not be associated by any NVDIMM region mapping.
The VCD's SPA Range Structure in NFIT is similar to virtual disk region
as following:
[028h 0040 2] Subtable Type : 0000 [System Physical Address Range]
[02Ah 0042 2] Length : 0038
[02Ch 0044 2] Range Index : 0000
[02Eh 0046 2] Flags (decoded below) : 0000
Add/Online Operation Only : 0
Proximity Domain Valid : 0
[030h 0048 4] Reserved : 00000000
[034h 0052 4] Proximity Domain : 00000000
[038h 0056 16] Address Range GUID : 77AB535A-45FC-624B-5560-F7B281D1F96E
[048h 0072 8] Address Range Base : 00000000B6ABD018
[050h 0080 8] Address Range Length : 0000000005500000
[058h 0088 8] Memory Map Attribute : 0000000000000000
The way to not associate a SPA range is to never reference it from a "flush hint",
"interleave", or "control region" table.
After testing on OVMF, pmem driver can support the region that it doesn't
assoicate to any NVDIMM mapping. So, treat VCD like pmem is a idea to get
a pmem block device that it contains iso.
v4:
Instoduce nfit_spa_is_virtual() to check virtual ramdisk SPA and create
pmem region.
v3:
To simplify patch, removed useless VCD region in libnvdimm.
v2:
Removed the code for setting VCD to a read-only region.
Cc: Gary Lin <GLin@suse.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Linda Knippers <linda.knippers@hpe.com>
Signed-off-by: Lee, Chun-Yi <jlee@suse.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2016-07-15 12:05:35 +08:00
|
|
|
if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&res, 0, sizeof(res));
|
2016-09-20 07:38:50 +08:00
|
|
|
memset(&mappings, 0, sizeof(mappings));
|
2015-06-25 16:21:02 +08:00
|
|
|
memset(&ndbr_desc, 0, sizeof(ndbr_desc));
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
res.start = spa->address;
|
|
|
|
res.end = res.start + spa->length - 1;
|
2015-06-25 16:21:02 +08:00
|
|
|
ndr_desc = &ndbr_desc.ndr_desc;
|
|
|
|
ndr_desc->res = &res;
|
|
|
|
ndr_desc->provider_data = nfit_spa;
|
|
|
|
ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
|
2018-11-10 04:43:07 +08:00
|
|
|
if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
|
2015-06-20 02:18:33 +08:00
|
|
|
ndr_desc->numa_node = acpi_map_pxm_to_online_node(
|
|
|
|
spa->proximity_domain);
|
ACPI: Do not create new NUMA domains from ACPI static tables that are not SRAT
Several ACPI static tables contain references to proximity domains.
ACPI 6.3 has clarified that only entries in SRAT may define a new
domain (sec 5.2.16).
Those tables described in the ACPI spec have additional clarifying text.
NFIT: Table 5-132,
"Integer that represents the proximity domain to which the memory
belongs. This number must match with corresponding entry in the
SRAT table."
HMAT: Table 5-145,
"... This number must match with the corresponding entry in the SRAT
table's processor affinity structure ... if the initiator is a processor,
or the Generic Initiator Affinity Structure if the initiator is a generic
initiator".
IORT and DMAR are defined by external specifications.
Intel Virtualization Technology for Directed I/O Rev 3.1 does not make any
explicit statements, but the general SRAT statement above will still apply.
https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf
IO Remapping Table, Platform Design Document rev D, also makes not explicit
statement, but refers to ACPI SRAT table for more information and again the
generic SRAT statement above applies.
https://developer.arm.com/documentation/den0049/d/
In conclusion, any proximity domain specified in these tables, should be a
reference to a proximity domain also found in SRAT, and they should not be
able to instantiate a new domain. Hence we switch to pxm_to_node() which
will only return existing nodes.
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Reviewed-by: Barry Song <song.bao.hua@hisilicon.com>
Reviewed-by: Hanjun Guo <guohanjun@huawei.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-08-18 22:24:26 +08:00
|
|
|
ndr_desc->target_node = pxm_to_node(spa->proximity_domain);
|
2018-11-10 04:43:07 +08:00
|
|
|
} else {
|
2015-06-20 02:18:33 +08:00
|
|
|
ndr_desc->numa_node = NUMA_NO_NODE;
|
2018-11-10 04:43:07 +08:00
|
|
|
ndr_desc->target_node = NUMA_NO_NODE;
|
|
|
|
}
|
2015-06-20 02:18:33 +08:00
|
|
|
|
2018-03-22 06:12:07 +08:00
|
|
|
/*
|
|
|
|
* Persistence domain bits are hierarchical, if
|
|
|
|
* ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
|
|
|
|
* ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
|
|
|
|
*/
|
|
|
|
if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
|
2018-02-01 03:45:38 +08:00
|
|
|
set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
|
2018-03-22 06:12:07 +08:00
|
|
|
else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
|
2018-02-01 03:45:43 +08:00
|
|
|
set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
|
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
|
|
|
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
|
2016-09-20 07:38:50 +08:00
|
|
|
struct nd_mapping_desc *mapping;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
|
|
|
|
if (memdev->range_index != spa->range_index)
|
|
|
|
continue;
|
|
|
|
if (count >= ND_MAX_MAPPINGS) {
|
|
|
|
dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
|
|
|
|
spa->range_index, ND_MAX_MAPPINGS);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
2016-09-20 07:38:50 +08:00
|
|
|
mapping = &mappings[count++];
|
|
|
|
rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
|
2016-02-18 05:01:23 +08:00
|
|
|
memdev, nfit_spa);
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
if (rc)
|
2016-02-18 05:01:23 +08:00
|
|
|
goto out;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
}
|
|
|
|
|
2016-09-20 07:38:50 +08:00
|
|
|
ndr_desc->mapping = mappings;
|
2015-06-25 16:21:02 +08:00
|
|
|
ndr_desc->num_mappings = count;
|
|
|
|
rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
|
2015-05-02 01:11:27 +08:00
|
|
|
if (rc)
|
2016-02-18 05:01:23 +08:00
|
|
|
goto out;
|
2015-05-02 01:11:27 +08:00
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
nvdimm_bus = acpi_desc->nvdimm_bus;
|
|
|
|
if (nfit_spa_type(spa) == NFIT_SPA_PM) {
|
ACPI: Change NFIT driver to insert new resource
ACPI 6 defines persistent memory (PMEM) ranges in multiple
firmware interfaces, e820, EFI, and ACPI NFIT table. This EFI
change, however, leads to hit a bug in the grub bootloader, which
treats EFI_PERSISTENT_MEMORY type as regular memory and corrupts
stored user data [1].
Therefore, BIOS may set generic reserved type in e820 and EFI to
cover PMEM ranges. The kernel can initialize PMEM ranges from
ACPI NFIT table alone.
This scheme causes a problem in the iomem table, though. On x86,
for instance, e820_reserve_resources() initializes top-level entries
(iomem_resource.child) from the e820 table at early boot-time.
This creates "reserved" entry for a PMEM range, which does not allow
region_intersects() to check with PMEM type.
Change acpi_nfit_register_region() to call acpi_nfit_insert_resource(),
which calls insert_resource() to insert a PMEM entry from NFIT when
the iomem table does not have a PMEM entry already. That is, when
a PMEM range is marked as reserved type in e820, it inserts
"Persistent Memory" entry, which results as follows.
+ "Persistent Memory"
+ "reserved"
This allows the EINJ driver, which calls region_intersects() to check
PMEM ranges, to work continuously even if BIOS sets reserved type
(or sets nothing) to PMEM ranges in e820 and EFI.
[1]: https://lists.gnu.org/archive/html/grub-devel/2015-11/msg00209.html
Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2016-03-10 03:47:06 +08:00
|
|
|
rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
|
2016-03-10 09:15:43 +08:00
|
|
|
if (rc) {
|
ACPI: Change NFIT driver to insert new resource
ACPI 6 defines persistent memory (PMEM) ranges in multiple
firmware interfaces, e820, EFI, and ACPI NFIT table. This EFI
change, however, leads to hit a bug in the grub bootloader, which
treats EFI_PERSISTENT_MEMORY type as regular memory and corrupts
stored user data [1].
Therefore, BIOS may set generic reserved type in e820 and EFI to
cover PMEM ranges. The kernel can initialize PMEM ranges from
ACPI NFIT table alone.
This scheme causes a problem in the iomem table, though. On x86,
for instance, e820_reserve_resources() initializes top-level entries
(iomem_resource.child) from the e820 table at early boot-time.
This creates "reserved" entry for a PMEM range, which does not allow
region_intersects() to check with PMEM type.
Change acpi_nfit_register_region() to call acpi_nfit_insert_resource(),
which calls insert_resource() to insert a PMEM entry from NFIT when
the iomem table does not have a PMEM entry already. That is, when
a PMEM range is marked as reserved type in e820, it inserts
"Persistent Memory" entry, which results as follows.
+ "Persistent Memory"
+ "reserved"
This allows the EINJ driver, which calls region_intersects() to check
PMEM ranges, to work continuously even if BIOS sets reserved type
(or sets nothing) to PMEM ranges in e820 and EFI.
[1]: https://lists.gnu.org/archive/html/grub-devel/2015-11/msg00209.html
Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2016-03-10 03:47:06 +08:00
|
|
|
dev_warn(acpi_desc->dev,
|
|
|
|
"failed to insert pmem resource to iomem: %d\n",
|
|
|
|
rc);
|
2016-03-10 09:15:43 +08:00
|
|
|
goto out;
|
2015-12-25 10:21:43 +08:00
|
|
|
}
|
2016-03-10 09:15:43 +08:00
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
|
|
|
|
ndr_desc);
|
|
|
|
if (!nfit_spa->nd_region)
|
|
|
|
rc = -ENOMEM;
|
2017-05-30 14:12:19 +08:00
|
|
|
} else if (nfit_spa_is_volatile(spa)) {
|
2016-02-18 05:01:23 +08:00
|
|
|
nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
|
|
|
|
ndr_desc);
|
|
|
|
if (!nfit_spa->nd_region)
|
|
|
|
rc = -ENOMEM;
|
acpi, nfit: treat virtual ramdisk SPA as pmem region
This patch adds logic to treat virtual ramdisk SPA as pmem region, then
ramdisk's /dev/pmem* device can be mounted with iso9660.
It's useful to work with the httpboot in EFI firmware to pull a remote
ISO file to the local memory region for booting and installation.
Wiki page of UEFI HTTPBoot with OVMF:
https://en.opensuse.org/UEFI_HTTPBoot_with_OVMF
The ramdisk function in EDK2/OVMF generates a ACPI0012 root device that
it contains empty _STA but without _DSM:
DefinitionBlock ("ssdt2.aml", "SSDT", 2, "INTEL ", "RamDisk ", 0x00001000)
{
Scope (\_SB)
{
Device (NVDR)
{
Name (_HID, "ACPI0012") // _HID: Hardware ID
Name (_STR, Unicode ("NVDIMM Root Device")) // _STR: Description String
Method (_STA, 0, NotSerialized) // _STA: Status
{
Return (0x0F)
}
}
}
}
In section 5.2.25.2 of ACPI 6.1 spec, it mentions that the "SPA Range
Structure Index" of virtual SPA shall be set to zero. That means virtual SPA
will not be associated by any NVDIMM region mapping.
The VCD's SPA Range Structure in NFIT is similar to virtual disk region
as following:
[028h 0040 2] Subtable Type : 0000 [System Physical Address Range]
[02Ah 0042 2] Length : 0038
[02Ch 0044 2] Range Index : 0000
[02Eh 0046 2] Flags (decoded below) : 0000
Add/Online Operation Only : 0
Proximity Domain Valid : 0
[030h 0048 4] Reserved : 00000000
[034h 0052 4] Proximity Domain : 00000000
[038h 0056 16] Address Range GUID : 77AB535A-45FC-624B-5560-F7B281D1F96E
[048h 0072 8] Address Range Base : 00000000B6ABD018
[050h 0080 8] Address Range Length : 0000000005500000
[058h 0088 8] Memory Map Attribute : 0000000000000000
The way to not associate a SPA range is to never reference it from a "flush hint",
"interleave", or "control region" table.
After testing on OVMF, pmem driver can support the region that it doesn't
assoicate to any NVDIMM mapping. So, treat VCD like pmem is a idea to get
a pmem block device that it contains iso.
v4:
Instoduce nfit_spa_is_virtual() to check virtual ramdisk SPA and create
pmem region.
v3:
To simplify patch, removed useless VCD region in libnvdimm.
v2:
Removed the code for setting VCD to a read-only region.
Cc: Gary Lin <GLin@suse.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Linda Knippers <linda.knippers@hpe.com>
Signed-off-by: Lee, Chun-Yi <jlee@suse.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2016-07-15 12:05:35 +08:00
|
|
|
} else if (nfit_spa_is_virtual(spa)) {
|
|
|
|
nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
|
|
|
|
ndr_desc);
|
|
|
|
if (!nfit_spa->nd_region)
|
|
|
|
rc = -ENOMEM;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
}
|
2015-10-28 06:58:27 +08:00
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
out:
|
|
|
|
if (rc)
|
|
|
|
dev_err(acpi_desc->dev, "failed to register spa range %d\n",
|
|
|
|
nfit_spa->spa->range_index);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-04-05 16:25:02 +08:00
|
|
|
static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc)
|
2016-02-18 05:01:23 +08:00
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
struct nd_cmd_ars_status *ars_status;
|
|
|
|
|
2018-04-05 16:25:02 +08:00
|
|
|
if (acpi_desc->ars_status) {
|
|
|
|
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
|
2016-02-18 05:01:23 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-05 16:25:02 +08:00
|
|
|
ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL);
|
2016-02-18 05:01:23 +08:00
|
|
|
if (!ars_status)
|
|
|
|
return -ENOMEM;
|
|
|
|
acpi_desc->ars_status = ars_status;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-05 16:25:02 +08:00
|
|
|
static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
|
2016-02-18 05:01:23 +08:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2018-04-05 16:25:02 +08:00
|
|
|
if (ars_status_alloc(acpi_desc))
|
2016-02-18 05:01:23 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
rc = ars_get_status(acpi_desc);
|
2018-04-06 07:18:55 +08:00
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
if (rc < 0 && rc != -ENOSPC)
|
|
|
|
return rc;
|
|
|
|
|
2018-04-05 16:25:02 +08:00
|
|
|
if (ars_status_process_records(acpi_desc))
|
2018-11-01 15:30:22 +08:00
|
|
|
dev_err(acpi_desc->dev, "Failed to process ARS records\n");
|
2016-02-18 05:01:23 +08:00
|
|
|
|
2018-11-01 15:30:22 +08:00
|
|
|
return rc;
|
2016-02-18 05:01:23 +08:00
|
|
|
}
|
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
static int ars_register(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_spa *nfit_spa)
|
2016-02-18 05:01:23 +08:00
|
|
|
{
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
int rc;
|
2016-02-18 05:01:23 +08:00
|
|
|
|
2019-02-14 01:01:13 +08:00
|
|
|
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
|
2018-04-03 06:28:03 +08:00
|
|
|
return acpi_nfit_register_region(acpi_desc, nfit_spa);
|
2016-02-18 05:01:23 +08:00
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
|
2019-02-14 01:01:13 +08:00
|
|
|
if (!no_init_ars)
|
|
|
|
set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
|
2016-02-18 05:01:23 +08:00
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
switch (acpi_nfit_query_poison(acpi_desc)) {
|
2018-04-06 07:18:55 +08:00
|
|
|
case 0:
|
2019-02-14 00:58:40 +08:00
|
|
|
case -ENOSPC:
|
2018-04-06 07:18:55 +08:00
|
|
|
case -EAGAIN:
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
|
|
|
|
/* shouldn't happen, try again later */
|
|
|
|
if (rc == -EBUSY)
|
2016-02-18 05:01:23 +08:00
|
|
|
break;
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
if (rc) {
|
2018-04-06 07:18:55 +08:00
|
|
|
set_bit(ARS_FAILED, &nfit_spa->ars_state);
|
|
|
|
break;
|
2016-02-18 05:01:23 +08:00
|
|
|
}
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
|
|
|
|
rc = acpi_nfit_query_poison(acpi_desc);
|
|
|
|
if (rc)
|
|
|
|
break;
|
|
|
|
acpi_desc->scrub_spa = nfit_spa;
|
|
|
|
ars_complete(acpi_desc, nfit_spa);
|
|
|
|
/*
|
|
|
|
* If ars_complete() says we didn't complete the
|
|
|
|
* short scrub, we'll try again with a long
|
|
|
|
* request.
|
|
|
|
*/
|
|
|
|
acpi_desc->scrub_spa = NULL;
|
2016-02-18 05:01:23 +08:00
|
|
|
break;
|
2018-04-06 07:18:55 +08:00
|
|
|
case -EBUSY:
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
case -ENOMEM:
|
|
|
|
/*
|
|
|
|
* BIOS was using ARS, wait for it to complete (or
|
|
|
|
* resources to become available) and then perform our
|
|
|
|
* own scrubs.
|
|
|
|
*/
|
2016-02-18 05:01:23 +08:00
|
|
|
break;
|
2018-04-06 07:18:55 +08:00
|
|
|
default:
|
|
|
|
set_bit(ARS_FAILED, &nfit_spa->ars_state);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return acpi_nfit_register_region(acpi_desc, nfit_spa);
|
2016-02-18 05:01:23 +08:00
|
|
|
}
|
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
static void ars_complete_all(struct acpi_nfit_desc *acpi_desc)
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
{
|
|
|
|
struct nfit_spa *nfit_spa;
|
2016-02-18 05:01:23 +08:00
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
2018-04-06 07:18:55 +08:00
|
|
|
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
|
2016-02-18 05:01:23 +08:00
|
|
|
continue;
|
2018-04-06 07:18:55 +08:00
|
|
|
ars_complete(acpi_desc, nfit_spa);
|
|
|
|
}
|
|
|
|
}
|
2016-02-18 05:01:23 +08:00
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
int query_rc)
|
|
|
|
{
|
|
|
|
unsigned int tmo = acpi_desc->scrub_tmo;
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
struct nfit_spa *nfit_spa;
|
2016-02-18 05:01:23 +08:00
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
lockdep_assert_held(&acpi_desc->init_mutex);
|
|
|
|
|
2019-02-14 01:57:22 +08:00
|
|
|
if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
|
2018-04-06 07:18:55 +08:00
|
|
|
return 0;
|
2016-02-18 05:01:23 +08:00
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
if (query_rc == -EBUSY) {
|
|
|
|
dev_dbg(dev, "ARS: ARS busy\n");
|
|
|
|
return min(30U * 60U, tmo * 2);
|
|
|
|
}
|
|
|
|
if (query_rc == -ENOSPC) {
|
|
|
|
dev_dbg(dev, "ARS: ARS continue\n");
|
|
|
|
ars_continue(acpi_desc);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (query_rc && query_rc != -EAGAIN) {
|
|
|
|
unsigned long long addr, end;
|
2016-02-18 05:01:23 +08:00
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
addr = acpi_desc->ars_status->address;
|
|
|
|
end = addr + acpi_desc->ars_status->length;
|
|
|
|
dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end,
|
|
|
|
query_rc);
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
}
|
2016-02-18 05:01:23 +08:00
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
ars_complete_all(acpi_desc);
|
2016-02-18 05:01:23 +08:00
|
|
|
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
enum nfit_ars_state req_type;
|
|
|
|
int rc;
|
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
|
|
|
|
continue;
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
|
|
|
|
/* prefer short ARS requests first */
|
|
|
|
if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
|
|
|
|
req_type = ARS_REQ_SHORT;
|
|
|
|
else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
|
|
|
|
req_type = ARS_REQ_LONG;
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
rc = ars_start(acpi_desc, nfit_spa, req_type);
|
|
|
|
|
|
|
|
dev = nd_region_dev(nfit_spa->nd_region);
|
|
|
|
dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
|
|
|
|
nfit_spa->spa->range_index,
|
|
|
|
req_type == ARS_REQ_SHORT ? "short" : "long",
|
|
|
|
rc);
|
|
|
|
/*
|
|
|
|
* Hmm, we raced someone else starting ARS? Try again in
|
|
|
|
* a bit.
|
|
|
|
*/
|
|
|
|
if (rc == -EBUSY)
|
|
|
|
return 1;
|
|
|
|
if (rc == 0) {
|
|
|
|
dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
|
|
|
|
"scrub start while range %d active\n",
|
|
|
|
acpi_desc->scrub_spa->spa->range_index);
|
|
|
|
clear_bit(req_type, &nfit_spa->ars_state);
|
|
|
|
acpi_desc->scrub_spa = nfit_spa;
|
|
|
|
/*
|
|
|
|
* Consider this spa last for future scrub
|
|
|
|
* requests
|
|
|
|
*/
|
|
|
|
list_move_tail(&nfit_spa->list, &acpi_desc->spas);
|
|
|
|
return 1;
|
2016-07-24 12:51:42 +08:00
|
|
|
}
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
|
|
|
|
dev_err(dev, "ARS: range %d ARS failed (%d)\n",
|
|
|
|
nfit_spa->spa->range_index, rc);
|
|
|
|
set_bit(ARS_FAILED, &nfit_spa->ars_state);
|
2016-02-18 05:01:23 +08:00
|
|
|
}
|
2018-04-06 07:18:55 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2016-02-18 05:01:23 +08:00
|
|
|
|
2018-07-06 05:58:49 +08:00
|
|
|
static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&acpi_desc->init_mutex);
|
|
|
|
|
2019-02-14 01:57:22 +08:00
|
|
|
set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
|
2018-07-06 05:58:49 +08:00
|
|
|
/* note this should only be set from within the workqueue */
|
|
|
|
if (tmo)
|
|
|
|
acpi_desc->scrub_tmo = tmo;
|
|
|
|
queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sched_ars(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
__sched_ars(acpi_desc, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&acpi_desc->init_mutex);
|
|
|
|
|
2019-02-14 01:57:22 +08:00
|
|
|
clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
|
2018-07-06 05:58:49 +08:00
|
|
|
acpi_desc->scrub_count++;
|
|
|
|
if (acpi_desc->scrub_count_state)
|
|
|
|
sysfs_notify_dirent(acpi_desc->scrub_count_state);
|
|
|
|
}
|
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
static void acpi_nfit_scrub(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_desc *acpi_desc;
|
|
|
|
unsigned int tmo;
|
|
|
|
int query_rc;
|
|
|
|
|
|
|
|
acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
|
|
|
|
mutex_lock(&acpi_desc->init_mutex);
|
|
|
|
query_rc = acpi_nfit_query_poison(acpi_desc);
|
|
|
|
tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
|
2018-07-06 05:58:49 +08:00
|
|
|
if (tmo)
|
|
|
|
__sched_ars(acpi_desc, tmo);
|
|
|
|
else
|
|
|
|
notify_ars_done(acpi_desc);
|
2018-04-06 07:18:55 +08:00
|
|
|
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
|
2019-02-14 01:04:07 +08:00
|
|
|
clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
|
2016-02-18 05:01:23 +08:00
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
|
|
}
|
|
|
|
|
2018-04-05 16:25:02 +08:00
|
|
|
static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_spa *nfit_spa)
|
|
|
|
{
|
|
|
|
int type = nfit_spa_type(nfit_spa->spa);
|
|
|
|
struct nd_cmd_ars_cap ars_cap;
|
|
|
|
int rc;
|
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
set_bit(ARS_FAILED, &nfit_spa->ars_state);
|
2018-04-05 16:25:02 +08:00
|
|
|
memset(&ars_cap, 0, sizeof(ars_cap));
|
|
|
|
rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
|
|
|
|
if (rc < 0)
|
|
|
|
return;
|
|
|
|
/* check that the supported scrub types match the spa type */
|
|
|
|
if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16)
|
|
|
|
& ND_ARS_VOLATILE) == 0)
|
|
|
|
return;
|
|
|
|
if (type == NFIT_SPA_PM && ((ars_cap.status >> 16)
|
|
|
|
& ND_ARS_PERSISTENT) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
nfit_spa->max_ars = ars_cap.max_ars_out;
|
|
|
|
nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
|
|
|
|
acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
|
2018-04-06 07:18:55 +08:00
|
|
|
clear_bit(ARS_FAILED, &nfit_spa->ars_state);
|
2018-04-05 16:25:02 +08:00
|
|
|
}
|
|
|
|
|
2016-02-18 05:01:23 +08:00
|
|
|
static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nfit_spa *nfit_spa;
|
2020-08-01 09:38:26 +08:00
|
|
|
int rc, do_sched_ars = 0;
|
2016-02-18 05:01:23 +08:00
|
|
|
|
2019-02-14 01:28:40 +08:00
|
|
|
set_bit(ARS_VALID, &acpi_desc->scrub_flags);
|
2018-04-03 07:49:30 +08:00
|
|
|
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
2018-04-06 07:18:55 +08:00
|
|
|
switch (nfit_spa_type(nfit_spa->spa)) {
|
|
|
|
case NFIT_SPA_VOLATILE:
|
|
|
|
case NFIT_SPA_PM:
|
2018-04-05 16:25:02 +08:00
|
|
|
acpi_nfit_init_ars(acpi_desc, nfit_spa);
|
2018-04-06 07:18:55 +08:00
|
|
|
break;
|
2018-04-05 16:25:02 +08:00
|
|
|
}
|
2018-04-03 07:49:30 +08:00
|
|
|
}
|
2016-02-18 05:01:23 +08:00
|
|
|
|
2020-08-01 09:38:26 +08:00
|
|
|
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
2018-04-06 07:18:55 +08:00
|
|
|
switch (nfit_spa_type(nfit_spa->spa)) {
|
|
|
|
case NFIT_SPA_VOLATILE:
|
|
|
|
case NFIT_SPA_PM:
|
|
|
|
/* register regions and kick off initial ARS run */
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
rc = ars_register(acpi_desc, nfit_spa);
|
2018-04-06 07:18:55 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
2020-08-01 09:38:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Kick off background ARS if at least one
|
|
|
|
* region successfully registered ARS
|
|
|
|
*/
|
|
|
|
if (!test_bit(ARS_FAILED, &nfit_spa->ars_state))
|
|
|
|
do_sched_ars++;
|
2018-04-06 07:18:55 +08:00
|
|
|
break;
|
|
|
|
case NFIT_SPA_BDW:
|
|
|
|
/* nothing to register */
|
|
|
|
break;
|
|
|
|
case NFIT_SPA_DCR:
|
|
|
|
case NFIT_SPA_VDISK:
|
|
|
|
case NFIT_SPA_VCD:
|
|
|
|
case NFIT_SPA_PDISK:
|
|
|
|
case NFIT_SPA_PCD:
|
|
|
|
/* register known regions that don't support ARS */
|
2016-02-18 05:01:23 +08:00
|
|
|
rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2018-04-06 07:18:55 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* don't register unknown regions */
|
|
|
|
break;
|
2016-02-18 05:01:23 +08:00
|
|
|
}
|
2020-08-01 09:38:26 +08:00
|
|
|
}
|
2016-02-18 05:01:23 +08:00
|
|
|
|
2020-08-01 09:38:26 +08:00
|
|
|
if (do_sched_ars)
|
|
|
|
sched_ars(acpi_desc);
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_table_prev *prev)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
|
|
|
|
if (!list_empty(&prev->spas) ||
|
|
|
|
!list_empty(&prev->memdevs) ||
|
|
|
|
!list_empty(&prev->dcrs) ||
|
|
|
|
!list_empty(&prev->bdws) ||
|
|
|
|
!list_empty(&prev->idts) ||
|
|
|
|
!list_empty(&prev->flushes)) {
|
|
|
|
dev_err(dev, "new nfit deletes entries (unsupported)\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-24 12:51:42 +08:00
|
|
|
static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
struct kernfs_node *nfit;
|
|
|
|
struct device *bus_dev;
|
|
|
|
|
|
|
|
if (!ars_supported(acpi_desc->nvdimm_bus))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
|
|
|
|
nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
|
|
|
|
if (!nfit) {
|
|
|
|
dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
|
|
|
|
sysfs_put(nfit);
|
|
|
|
if (!acpi_desc->scrub_count_state) {
|
|
|
|
dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-19 00:56:31 +08:00
|
|
|
static void acpi_nfit_unregister(void *data)
|
2016-07-22 09:05:36 +08:00
|
|
|
{
|
|
|
|
struct acpi_nfit_desc *acpi_desc = data;
|
|
|
|
|
|
|
|
nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
|
|
|
|
}
|
|
|
|
|
2016-07-15 07:19:55 +08:00
|
|
|
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
|
2015-05-20 10:54:31 +08:00
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-28 06:58:27 +08:00
|
|
|
struct nfit_table_prev prev;
|
2015-05-20 10:54:31 +08:00
|
|
|
const void *end;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
int rc;
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2016-07-22 09:05:36 +08:00
|
|
|
if (!acpi_desc->nvdimm_bus) {
|
2016-07-24 12:51:42 +08:00
|
|
|
acpi_nfit_init_dsms(acpi_desc);
|
|
|
|
|
2016-07-22 09:05:36 +08:00
|
|
|
acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
|
|
|
|
&acpi_desc->nd_desc);
|
|
|
|
if (!acpi_desc->nvdimm_bus)
|
|
|
|
return -ENOMEM;
|
2016-07-24 12:51:42 +08:00
|
|
|
|
2017-04-19 00:56:31 +08:00
|
|
|
rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
|
2016-07-22 09:05:36 +08:00
|
|
|
acpi_desc);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2016-07-24 12:51:42 +08:00
|
|
|
|
|
|
|
rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2016-07-24 12:51:21 +08:00
|
|
|
|
|
|
|
/* register this acpi_desc for mce notifications */
|
|
|
|
mutex_lock(&acpi_desc_lock);
|
|
|
|
list_add_tail(&acpi_desc->list, &acpi_descs);
|
|
|
|
mutex_unlock(&acpi_desc_lock);
|
2016-07-22 09:05:36 +08:00
|
|
|
}
|
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
mutex_lock(&acpi_desc->init_mutex);
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&prev.spas);
|
|
|
|
INIT_LIST_HEAD(&prev.memdevs);
|
|
|
|
INIT_LIST_HEAD(&prev.dcrs);
|
|
|
|
INIT_LIST_HEAD(&prev.bdws);
|
|
|
|
INIT_LIST_HEAD(&prev.idts);
|
|
|
|
INIT_LIST_HEAD(&prev.flushes);
|
|
|
|
|
|
|
|
list_cut_position(&prev.spas, &acpi_desc->spas,
|
|
|
|
acpi_desc->spas.prev);
|
|
|
|
list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
|
|
|
|
acpi_desc->memdevs.prev);
|
|
|
|
list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
|
|
|
|
acpi_desc->dcrs.prev);
|
|
|
|
list_cut_position(&prev.bdws, &acpi_desc->bdws,
|
|
|
|
acpi_desc->bdws.prev);
|
|
|
|
list_cut_position(&prev.idts, &acpi_desc->idts,
|
|
|
|
acpi_desc->idts.prev);
|
|
|
|
list_cut_position(&prev.flushes, &acpi_desc->flushes,
|
|
|
|
acpi_desc->flushes.prev);
|
2015-05-20 10:54:31 +08:00
|
|
|
|
|
|
|
end = data + sz;
|
|
|
|
while (!IS_ERR_OR_NULL(data))
|
2015-10-28 06:58:27 +08:00
|
|
|
data = add_table(acpi_desc, &prev, data, end);
|
2015-05-20 10:54:31 +08:00
|
|
|
|
|
|
|
if (IS_ERR(data)) {
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data));
|
2015-10-28 06:58:27 +08:00
|
|
|
rc = PTR_ERR(data);
|
|
|
|
goto out_unlock;
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
rc = acpi_nfit_check_deletions(acpi_desc, &prev);
|
|
|
|
if (rc)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2016-06-11 09:20:53 +08:00
|
|
|
rc = nfit_mem_init(acpi_desc);
|
|
|
|
if (rc)
|
2015-10-28 06:58:27 +08:00
|
|
|
goto out_unlock;
|
2015-06-09 02:27:06 +08:00
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
rc = acpi_nfit_register_dimms(acpi_desc);
|
|
|
|
if (rc)
|
2015-10-28 06:58:27 +08:00
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
rc = acpi_nfit_register_regions(acpi_desc);
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
|
|
return rc;
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
2015-06-18 05:23:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(acpi_nfit_init);
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2016-02-20 04:16:34 +08:00
|
|
|
static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
|
|
|
|
{
|
2019-01-05 16:08:38 +08:00
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
2016-02-20 04:16:34 +08:00
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
/* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_lock(dev);
|
|
|
|
nfit_device_unlock(dev);
|
2016-02-20 04:16:34 +08:00
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
/* Bounce the init_mutex to complete initial registration */
|
2017-04-14 13:48:46 +08:00
|
|
|
mutex_lock(&acpi_desc->init_mutex);
|
2017-04-19 00:56:31 +08:00
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
2017-02-03 02:31:00 +08:00
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
return 0;
|
2016-02-20 04:16:34 +08:00
|
|
|
}
|
|
|
|
|
acpi/nfit: Add support for Intel DSM 1.8 commands
Add command definition for security commands defined in Intel DSM
specification v1.8 [1]. This includes "get security state", "set
passphrase", "unlock unit", "freeze lock", "secure erase", "overwrite",
"overwrite query", "master passphrase enable/disable", and "master
erase", . Since this adds several Intel definitions, move the relevant
bits to their own header.
These commands mutate physical data, but that manipulation is not cache
coherent. The requirement to flush and invalidate caches makes these
commands unsuitable to be called from userspace, so extra logic is added
to detect and block these commands from being submitted via the ioctl
command submission path.
Lastly, the commands may contain sensitive key material that should not
be dumped in a standard debug session. Update the nvdimm-command
payload-dump facility to move security command payloads behind a
default-off compile time switch.
[1]: http://pmem.io/documents/NVDIMM_DSM_Interface-V1.8.pdf
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-12-05 02:31:11 +08:00
|
|
|
static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
|
2016-02-23 13:50:31 +08:00
|
|
|
struct nvdimm *nvdimm, unsigned int cmd)
|
|
|
|
{
|
2019-01-05 16:08:38 +08:00
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
2016-02-23 13:50:31 +08:00
|
|
|
|
|
|
|
if (nvdimm)
|
|
|
|
return 0;
|
|
|
|
if (cmd != ND_CMD_ARS_START)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The kernel and userspace may race to initiate a scrub, but
|
|
|
|
* the scrub thread is prepared to lose that initial race. It
|
2018-10-16 03:57:31 +08:00
|
|
|
* just needs guarantees that any ARS it initiates are not
|
|
|
|
* interrupted by any intervening start requests from userspace.
|
2016-02-23 13:50:31 +08:00
|
|
|
*/
|
2018-11-04 08:53:09 +08:00
|
|
|
if (work_busy(&acpi_desc->dwork.work))
|
|
|
|
return -EBUSY;
|
2016-02-23 13:50:31 +08:00
|
|
|
|
2018-11-04 08:53:09 +08:00
|
|
|
return 0;
|
2016-02-23 13:50:31 +08:00
|
|
|
}
|
|
|
|
|
2020-07-21 06:07:40 +08:00
|
|
|
/*
|
|
|
|
* Prevent security and firmware activate commands from being issued via
|
|
|
|
* ioctl.
|
|
|
|
*/
|
acpi/nfit: Add support for Intel DSM 1.8 commands
Add command definition for security commands defined in Intel DSM
specification v1.8 [1]. This includes "get security state", "set
passphrase", "unlock unit", "freeze lock", "secure erase", "overwrite",
"overwrite query", "master passphrase enable/disable", and "master
erase", . Since this adds several Intel definitions, move the relevant
bits to their own header.
These commands mutate physical data, but that manipulation is not cache
coherent. The requirement to flush and invalidate caches makes these
commands unsuitable to be called from userspace, so extra logic is added
to detect and block these commands from being submitted via the ioctl
command submission path.
Lastly, the commands may contain sensitive key material that should not
be dumped in a standard debug session. Update the nvdimm-command
payload-dump facility to move security command payloads behind a
default-off compile time switch.
[1]: http://pmem.io/documents/NVDIMM_DSM_Interface-V1.8.pdf
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-12-05 02:31:11 +08:00
|
|
|
static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
|
|
|
|
struct nvdimm *nvdimm, unsigned int cmd, void *buf)
|
|
|
|
{
|
|
|
|
struct nd_cmd_pkg *call_pkg = buf;
|
|
|
|
unsigned int func;
|
|
|
|
|
|
|
|
if (nvdimm && cmd == ND_CMD_CALL &&
|
|
|
|
call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
|
|
|
|
func = call_pkg->nd_command;
|
2020-02-26 00:20:06 +08:00
|
|
|
if (func > NVDIMM_CMD_MAX ||
|
2020-07-21 06:07:40 +08:00
|
|
|
(1 << func) & NVDIMM_INTEL_DENY_CMDMASK)
|
acpi/nfit: Add support for Intel DSM 1.8 commands
Add command definition for security commands defined in Intel DSM
specification v1.8 [1]. This includes "get security state", "set
passphrase", "unlock unit", "freeze lock", "secure erase", "overwrite",
"overwrite query", "master passphrase enable/disable", and "master
erase", . Since this adds several Intel definitions, move the relevant
bits to their own header.
These commands mutate physical data, but that manipulation is not cache
coherent. The requirement to flush and invalidate caches makes these
commands unsuitable to be called from userspace, so extra logic is added
to detect and block these commands from being submitted via the ioctl
command submission path.
Lastly, the commands may contain sensitive key material that should not
be dumped in a standard debug session. Update the nvdimm-command
payload-dump facility to move security command payloads behind a
default-off compile time switch.
[1]: http://pmem.io/documents/NVDIMM_DSM_Interface-V1.8.pdf
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-12-05 02:31:11 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2020-07-21 06:07:40 +08:00
|
|
|
/* block all non-nfit bus commands */
|
|
|
|
if (!nvdimm && cmd == ND_CMD_CALL &&
|
|
|
|
call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
acpi/nfit: Add support for Intel DSM 1.8 commands
Add command definition for security commands defined in Intel DSM
specification v1.8 [1]. This includes "get security state", "set
passphrase", "unlock unit", "freeze lock", "secure erase", "overwrite",
"overwrite query", "master passphrase enable/disable", and "master
erase", . Since this adds several Intel definitions, move the relevant
bits to their own header.
These commands mutate physical data, but that manipulation is not cache
coherent. The requirement to flush and invalidate caches makes these
commands unsuitable to be called from userspace, so extra logic is added
to detect and block these commands from being submitted via the ioctl
command submission path.
Lastly, the commands may contain sensitive key material that should not
be dumped in a standard debug session. Update the nvdimm-command
payload-dump facility to move security command payloads behind a
default-off compile time switch.
[1]: http://pmem.io/documents/NVDIMM_DSM_Interface-V1.8.pdf
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-12-05 02:31:11 +08:00
|
|
|
return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
|
|
|
|
}
|
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
enum nfit_ars_state req_type)
|
2016-07-24 12:51:42 +08:00
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
2018-04-06 07:18:55 +08:00
|
|
|
int scheduled = 0, busy = 0;
|
2016-07-24 12:51:42 +08:00
|
|
|
struct nfit_spa *nfit_spa;
|
|
|
|
|
2017-04-19 00:56:31 +08:00
|
|
|
mutex_lock(&acpi_desc->init_mutex);
|
2019-02-14 01:57:22 +08:00
|
|
|
if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
|
2017-04-19 00:56:31 +08:00
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
2016-07-24 12:51:42 +08:00
|
|
|
return 0;
|
2017-04-19 00:56:31 +08:00
|
|
|
}
|
2016-07-24 12:51:42 +08:00
|
|
|
|
|
|
|
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
2018-04-06 07:18:55 +08:00
|
|
|
int type = nfit_spa_type(nfit_spa->spa);
|
2016-07-24 12:51:42 +08:00
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE)
|
|
|
|
continue;
|
|
|
|
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
|
2016-07-24 12:51:42 +08:00
|
|
|
continue;
|
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
if (test_and_set_bit(req_type, &nfit_spa->ars_state))
|
2018-04-06 07:18:55 +08:00
|
|
|
busy++;
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
else
|
2018-04-06 07:18:55 +08:00
|
|
|
scheduled++;
|
|
|
|
}
|
|
|
|
if (scheduled) {
|
2018-07-06 05:58:49 +08:00
|
|
|
sched_ars(acpi_desc);
|
2018-04-06 07:18:55 +08:00
|
|
|
dev_dbg(dev, "ars_scan triggered\n");
|
2016-07-24 12:51:42 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
|
|
|
2018-04-06 07:18:55 +08:00
|
|
|
if (scheduled)
|
|
|
|
return 0;
|
|
|
|
if (busy)
|
|
|
|
return -EBUSY;
|
|
|
|
return -ENOTTY;
|
2016-07-24 12:51:42 +08:00
|
|
|
}
|
|
|
|
|
2016-02-20 04:29:32 +08:00
|
|
|
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
|
2015-05-20 10:54:31 +08:00
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc;
|
|
|
|
|
|
|
|
dev_set_drvdata(dev, acpi_desc);
|
|
|
|
acpi_desc->dev = dev;
|
2015-06-18 05:23:32 +08:00
|
|
|
acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
|
2015-05-20 10:54:31 +08:00
|
|
|
nd_desc = &acpi_desc->nd_desc;
|
|
|
|
nd_desc->provider_name = "ACPI.NFIT";
|
2016-07-22 11:03:19 +08:00
|
|
|
nd_desc->module = THIS_MODULE;
|
2015-05-20 10:54:31 +08:00
|
|
|
nd_desc->ndctl = acpi_nfit_ctl;
|
2016-02-20 04:16:34 +08:00
|
|
|
nd_desc->flush_probe = acpi_nfit_flush_probe;
|
2016-02-23 13:50:31 +08:00
|
|
|
nd_desc->clear_to_send = acpi_nfit_clear_to_send;
|
2015-04-27 07:26:48 +08:00
|
|
|
nd_desc->attr_groups = acpi_nfit_attribute_groups;
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
INIT_LIST_HEAD(&acpi_desc->spas);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->dcrs);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->bdws);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->idts);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->flushes);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->memdevs);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->dimms);
|
2016-07-24 12:51:21 +08:00
|
|
|
INIT_LIST_HEAD(&acpi_desc->list);
|
2015-10-28 06:58:27 +08:00
|
|
|
mutex_init(&acpi_desc->init_mutex);
|
2018-04-06 07:18:55 +08:00
|
|
|
acpi_desc->scrub_tmo = 1;
|
|
|
|
INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub);
|
2015-10-28 06:58:27 +08:00
|
|
|
}
|
2016-02-20 04:29:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
|
2015-10-28 06:58:27 +08:00
|
|
|
|
2017-04-04 04:52:14 +08:00
|
|
|
static void acpi_nfit_put_table(void *table)
|
|
|
|
{
|
|
|
|
acpi_put_table(table);
|
|
|
|
}
|
|
|
|
|
2017-04-19 00:56:31 +08:00
|
|
|
void acpi_nfit_shutdown(void *data)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_desc *acpi_desc = data;
|
|
|
|
struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destruct under acpi_desc_lock so that nfit_handle_mce does not
|
|
|
|
* race teardown
|
|
|
|
*/
|
|
|
|
mutex_lock(&acpi_desc_lock);
|
|
|
|
list_del(&acpi_desc->list);
|
|
|
|
mutex_unlock(&acpi_desc_lock);
|
|
|
|
|
|
|
|
mutex_lock(&acpi_desc->init_mutex);
|
2019-02-14 01:57:22 +08:00
|
|
|
set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
|
2018-04-06 07:18:55 +08:00
|
|
|
cancel_delayed_work_sync(&acpi_desc->dwork);
|
2017-04-19 00:56:31 +08:00
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bounce the nvdimm bus lock to make sure any in-flight
|
|
|
|
* acpi_nfit_ars_rescan() submissions have had a chance to
|
|
|
|
* either submit or see ->cancel set.
|
|
|
|
*/
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_lock(bus_dev);
|
|
|
|
nfit_device_unlock(bus_dev);
|
2017-04-19 00:56:31 +08:00
|
|
|
|
|
|
|
flush_workqueue(nfit_wq);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
|
|
|
|
|
2015-10-28 06:58:27 +08:00
|
|
|
static int acpi_nfit_add(struct acpi_device *adev)
|
|
|
|
{
|
|
|
|
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
|
|
|
struct acpi_nfit_desc *acpi_desc;
|
|
|
|
struct device *dev = &adev->dev;
|
|
|
|
struct acpi_table_header *tbl;
|
|
|
|
acpi_status status = AE_OK;
|
|
|
|
acpi_size sz;
|
2016-07-15 08:22:48 +08:00
|
|
|
int rc = 0;
|
2015-10-28 06:58:27 +08:00
|
|
|
|
2016-12-14 15:04:39 +08:00
|
|
|
status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
|
2015-10-28 06:58:27 +08:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2018-08-07 14:15:31 +08:00
|
|
|
/* The NVDIMM root device allows OS to trigger enumeration of
|
|
|
|
* NVDIMMs through NFIT at boot time and re-enumeration at
|
|
|
|
* root level via the _FIT method during runtime.
|
|
|
|
* This is ok to return 0 here, we could have an nvdimm
|
|
|
|
* hotplugged later and evaluate _FIT method which returns
|
|
|
|
* data in the format of a series of NFIT Structures.
|
|
|
|
*/
|
2015-10-28 06:58:27 +08:00
|
|
|
dev_dbg(dev, "failed to find NFIT at startup\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2017-04-04 04:52:14 +08:00
|
|
|
|
|
|
|
rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2016-12-14 15:04:39 +08:00
|
|
|
sz = tbl->length;
|
2015-10-28 06:58:27 +08:00
|
|
|
|
2016-02-20 04:29:32 +08:00
|
|
|
acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
|
|
|
|
if (!acpi_desc)
|
|
|
|
return -ENOMEM;
|
|
|
|
acpi_nfit_desc_init(acpi_desc, &adev->dev);
|
2015-10-28 06:58:27 +08:00
|
|
|
|
2016-07-15 07:19:55 +08:00
|
|
|
/* Save the acpi header for exporting the revision via sysfs */
|
2015-11-21 08:05:49 +08:00
|
|
|
acpi_desc->acpi_header = *tbl;
|
2015-10-28 06:58:27 +08:00
|
|
|
|
|
|
|
/* Evaluate _FIT and override with that if present */
|
|
|
|
status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
|
|
|
|
if (ACPI_SUCCESS(status) && buf.length > 0) {
|
2016-07-15 07:19:55 +08:00
|
|
|
union acpi_object *obj = buf.pointer;
|
|
|
|
|
|
|
|
if (obj->type == ACPI_TYPE_BUFFER)
|
|
|
|
rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
|
|
|
|
obj->buffer.length);
|
|
|
|
else
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
|
|
|
|
(int) obj->type);
|
2016-07-15 08:22:48 +08:00
|
|
|
kfree(buf.pointer);
|
|
|
|
} else
|
2016-07-15 07:19:55 +08:00
|
|
|
/* skip over the lead-in header table */
|
|
|
|
rc = acpi_nfit_init(acpi_desc, (void *) tbl
|
|
|
|
+ sizeof(struct acpi_table_nfit),
|
|
|
|
sz - sizeof(struct acpi_table_nfit));
|
2017-04-19 00:56:31 +08:00
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_remove(struct acpi_device *adev)
|
|
|
|
{
|
2017-04-19 00:56:31 +08:00
|
|
|
/* see acpi_nfit_unregister */
|
2015-05-20 10:54:31 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-09 02:36:57 +08:00
|
|
|
static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
|
2015-10-28 06:58:27 +08:00
|
|
|
{
|
2016-08-19 13:15:04 +08:00
|
|
|
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
|
2015-10-28 06:58:27 +08:00
|
|
|
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
2016-07-15 07:19:55 +08:00
|
|
|
union acpi_object *obj;
|
2015-10-28 06:58:27 +08:00
|
|
|
acpi_status status;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!dev->driver) {
|
|
|
|
/* dev->driver may be null if we're being removed */
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "no driver found for dev\n");
|
2016-08-19 13:15:04 +08:00
|
|
|
return;
|
2015-10-28 06:58:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!acpi_desc) {
|
2016-02-20 04:29:32 +08:00
|
|
|
acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
|
|
|
|
if (!acpi_desc)
|
2016-08-19 13:15:04 +08:00
|
|
|
return;
|
|
|
|
acpi_nfit_desc_init(acpi_desc, dev);
|
2016-02-20 04:16:34 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Finish previous registration before considering new
|
|
|
|
* regions.
|
|
|
|
*/
|
|
|
|
flush_workqueue(nfit_wq);
|
2015-10-28 06:58:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Evaluate _FIT */
|
2016-08-19 13:15:04 +08:00
|
|
|
status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
|
2015-10-28 06:58:27 +08:00
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
dev_err(dev, "failed to evaluate _FIT\n");
|
2016-08-19 13:15:04 +08:00
|
|
|
return;
|
2015-10-28 06:58:27 +08:00
|
|
|
}
|
|
|
|
|
2015-11-21 08:05:49 +08:00
|
|
|
obj = buf.pointer;
|
|
|
|
if (obj->type == ACPI_TYPE_BUFFER) {
|
2016-07-15 07:19:55 +08:00
|
|
|
ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
|
|
|
|
obj->buffer.length);
|
2016-07-15 08:22:48 +08:00
|
|
|
if (ret)
|
2015-11-21 08:05:49 +08:00
|
|
|
dev_err(dev, "failed to merge updated NFIT\n");
|
2016-07-15 08:22:48 +08:00
|
|
|
} else
|
2015-11-21 08:05:49 +08:00
|
|
|
dev_err(dev, "Invalid _FIT\n");
|
2015-10-28 06:58:27 +08:00
|
|
|
kfree(buf.pointer);
|
2016-08-19 13:15:04 +08:00
|
|
|
}
|
2017-06-09 02:36:57 +08:00
|
|
|
|
|
|
|
static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
|
|
|
|
|
acpi, nfit: Fix Address Range Scrub completion tracking
The Address Range Scrub implementation tried to skip running scrubs
against ranges that were already scrubbed by the BIOS. Unfortunately
that support also resulted in early scrub completions as evidenced by
this debug output from nfit_test:
nd_region region9: ARS: range 1 short complete
nd_region region3: ARS: range 1 short complete
nd_region region4: ARS: range 2 ARS start (0)
nd_region region4: ARS: range 2 short complete
...i.e. completions without any indications that the scrub was started.
This state of affairs was hard to see in the code due to the
proliferation of state bits and mistakenly trying to track done state
per-range when the completion is a global property of the bus.
So, kill the four ARS state bits (ARS_REQ, ARS_REQ_REDO, ARS_DONE, and
ARS_SHORT), and replace them with just 2 request flags ARS_REQ_SHORT and
ARS_REQ_LONG. The implementation will still complete and reap the
results of BIOS initiated ARS, but it will not attempt to use that
information to affect the completion status of scrubbing the ranges from
a Linux perspective.
Instead, try to synchronously run a short ARS per range at init time and
schedule a long scrub in the background. If ARS is busy with an ARS
request, schedule both a short and a long scrub for when ARS returns to
idle. This logic also satisfies the intent of what ARS_REQ_REDO was
trying to achieve. The new rule is that the REQ flag stays set until the
next successful ars_start() for that range.
With the new policy that the REQ flags are not cleared until the next
start, the implementation no longer loses requests as can be seen from
the following log:
nd_region region3: ARS: range 1 ARS start short (0)
nd_region region9: ARS: range 1 ARS start short (0)
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start short (0)
nd_region region9: ARS: range 1 complete
nd_region region9: ARS: range 1 ARS start long (0)
nd_region region4: ARS: range 2 complete
nd_region region3: ARS: range 1 ARS start long (0)
nd_region region9: ARS: range 1 complete
nd_region region3: ARS: range 1 complete
nd_region region4: ARS: range 2 ARS start long (0)
nd_region region4: ARS: range 2 complete
...note that the nfit_test emulated driver provides 2 buses, that is why
some of the range indices are duplicated. Notice that each range
now successfully completes a short and long scrub.
Cc: <stable@vger.kernel.org>
Fixes: 14c73f997a5e ("nfit, address-range-scrub: introduce nfit_spa->ars_state")
Fixes: cc3d3458d46f ("acpi/nfit: queue issuing of ars when an uc error...")
Reported-by: Jacek Zloch <jacek.zloch@intel.com>
Reported-by: Krzysztof Rusocki <krzysztof.rusocki@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-10-14 11:32:17 +08:00
|
|
|
if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
|
|
|
|
acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
|
|
|
|
else
|
|
|
|
acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
|
2017-06-09 02:36:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
|
|
|
|
{
|
2018-03-02 20:20:49 +08:00
|
|
|
dev_dbg(dev, "event: 0x%x\n", event);
|
2017-06-09 02:36:57 +08:00
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NFIT_NOTIFY_UPDATE:
|
|
|
|
return acpi_nfit_update_notify(dev, handle);
|
|
|
|
case NFIT_NOTIFY_UC_MEMORY_ERROR:
|
|
|
|
return acpi_nfit_uc_error_notify(dev, handle);
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2016-08-19 13:15:04 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
|
2015-10-28 06:58:27 +08:00
|
|
|
|
2016-08-19 13:15:04 +08:00
|
|
|
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
|
|
|
|
{
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_lock(&adev->dev);
|
2016-08-19 13:15:04 +08:00
|
|
|
__acpi_nfit_notify(&adev->dev, adev->handle, event);
|
2019-07-18 09:08:26 +08:00
|
|
|
nfit_device_unlock(&adev->dev);
|
2015-10-28 06:58:27 +08:00
|
|
|
}
|
|
|
|
|
2015-05-20 10:54:31 +08:00
|
|
|
static const struct acpi_device_id acpi_nfit_ids[] = {
|
|
|
|
{ "ACPI0012", 0 },
|
|
|
|
{ "", 0 },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
|
|
|
|
|
|
|
|
static struct acpi_driver acpi_nfit_driver = {
|
|
|
|
.name = KBUILD_MODNAME,
|
|
|
|
.ids = acpi_nfit_ids,
|
|
|
|
.ops = {
|
|
|
|
.add = acpi_nfit_add,
|
|
|
|
.remove = acpi_nfit_remove,
|
2015-10-28 06:58:27 +08:00
|
|
|
.notify = acpi_nfit_notify,
|
2015-05-20 10:54:31 +08:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static __init int nfit_init(void)
|
|
|
|
{
|
2017-06-01 01:32:00 +08:00
|
|
|
int ret;
|
|
|
|
|
2015-05-20 10:54:31 +08:00
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
|
2018-02-01 03:45:38 +08:00
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2017-06-06 00:40:42 +08:00
|
|
|
guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
|
|
|
|
guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
|
|
|
|
guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
|
|
|
|
guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
|
|
|
|
guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
|
|
|
|
guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
|
|
|
|
guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
|
|
|
|
guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
|
|
|
|
guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
|
|
|
|
guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
|
|
|
|
guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
|
|
|
|
guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
|
|
|
|
guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
|
2019-01-29 08:56:17 +08:00
|
|
|
guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
|
2020-07-21 06:07:40 +08:00
|
|
|
guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]);
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2016-02-20 04:16:34 +08:00
|
|
|
nfit_wq = create_singlethread_workqueue("nfit");
|
|
|
|
if (!nfit_wq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-07-24 12:51:21 +08:00
|
|
|
nfit_mce_register();
|
2017-06-01 01:32:00 +08:00
|
|
|
ret = acpi_bus_register_driver(&acpi_nfit_driver);
|
|
|
|
if (ret) {
|
|
|
|
nfit_mce_unregister();
|
|
|
|
destroy_workqueue(nfit_wq);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2016-07-24 12:51:21 +08:00
|
|
|
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static __exit void nfit_exit(void)
|
|
|
|
{
|
2016-07-24 12:51:21 +08:00
|
|
|
nfit_mce_unregister();
|
2015-05-20 10:54:31 +08:00
|
|
|
acpi_bus_unregister_driver(&acpi_nfit_driver);
|
2016-02-20 04:16:34 +08:00
|
|
|
destroy_workqueue(nfit_wq);
|
2016-07-24 12:51:21 +08:00
|
|
|
WARN_ON(!list_empty(&acpi_descs));
|
2015-05-20 10:54:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(nfit_init);
|
|
|
|
module_exit(nfit_exit);
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_AUTHOR("Intel Corporation");
|