Merge branch 'for-6.5/cxl-background' into for-6.5/cxl
Pick up the sanitization work and the infrastructure for other background commands for 6.5. Sanitization has a different completion path than typical background commands so it was important to have both thought out and implemented before either went upstream.
This commit is contained in:
commit
dcfb70610d
|
@ -58,6 +58,43 @@ Description:
|
|||
affinity for this device.
|
||||
|
||||
|
||||
What: /sys/bus/cxl/devices/memX/security/state
|
||||
Date: June, 2023
|
||||
KernelVersion: v6.5
|
||||
Contact: linux-cxl@vger.kernel.org
|
||||
Description:
|
||||
(RO) Reading this file will display the CXL security state for
|
||||
that device. Such states can be: 'disabled', 'sanitize', when
|
||||
a sanitization is currently underway; or those available only
|
||||
for persistent memory: 'locked', 'unlocked' or 'frozen'. This
|
||||
sysfs entry is select/poll capable from userspace to notify
|
||||
upon completion of a sanitize operation.
|
||||
|
||||
|
||||
What: /sys/bus/cxl/devices/memX/security/sanitize
|
||||
Date: June, 2023
|
||||
KernelVersion: v6.5
|
||||
Contact: linux-cxl@vger.kernel.org
|
||||
Description:
|
||||
(WO) Write a boolean 'true' string value to this attribute to
|
||||
sanitize the device to securely re-purpose or decommission it.
|
||||
This is done by ensuring that all user data and meta-data,
|
||||
whether it resides in persistent capacity, volatile capacity,
|
||||
or the LSA, is made permanently unavailable by whatever means
|
||||
is appropriate for the media type. This functionality requires
|
||||
the device to be not be actively decoding any HPA ranges.
|
||||
|
||||
|
||||
What /sys/bus/cxl/devices/memX/security/erase
|
||||
Date: June, 2023
|
||||
KernelVersion: v6.5
|
||||
Contact: linux-cxl@vger.kernel.org
|
||||
Description:
|
||||
(WO) Write a boolean 'true' string value to this attribute to
|
||||
secure erase user data by changing the media encryption keys for
|
||||
all user data areas of the device.
|
||||
|
||||
|
||||
What: /sys/bus/cxl/devices/*/devtype
|
||||
Date: June, 2021
|
||||
KernelVersion: v5.14
|
||||
|
|
|
@ -220,7 +220,8 @@ int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS)
|
||||
if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
|
||||
mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
|
||||
return cxl_mbox_cmd_rc2errno(mbox_cmd);
|
||||
|
||||
if (!out_size)
|
||||
|
@ -1074,6 +1075,65 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
|
|||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
|
||||
|
||||
/**
|
||||
* cxl_mem_sanitize() - Send a sanitization command to the device.
|
||||
* @cxlds: The device data for the operation
|
||||
* @cmd: The specific sanitization command opcode
|
||||
*
|
||||
* Return: 0 if the command was executed successfully, regardless of
|
||||
* whether or not the actual security operation is done in the background,
|
||||
* such as for the Sanitize case.
|
||||
* Error return values can be the result of the mailbox command, -EINVAL
|
||||
* when security requirements are not met or invalid contexts.
|
||||
*
|
||||
* See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
|
||||
*/
|
||||
int cxl_mem_sanitize(struct cxl_dev_state *cxlds, u16 cmd)
|
||||
{
|
||||
int rc;
|
||||
u32 sec_out = 0;
|
||||
struct cxl_get_security_output {
|
||||
__le32 flags;
|
||||
} out;
|
||||
struct cxl_mbox_cmd sec_cmd = {
|
||||
.opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
|
||||
.payload_out = &out,
|
||||
.size_out = sizeof(out),
|
||||
};
|
||||
struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
|
||||
|
||||
if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
|
||||
return -EINVAL;
|
||||
|
||||
rc = cxl_internal_send_cmd(cxlds, &sec_cmd);
|
||||
if (rc < 0) {
|
||||
dev_err(cxlds->dev, "Failed to get security state : %d", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prior to using these commands, any security applied to
|
||||
* the user data areas of the device shall be DISABLED (or
|
||||
* UNLOCKED for secure erase case).
|
||||
*/
|
||||
sec_out = le32_to_cpu(out.flags);
|
||||
if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
|
||||
sec_out & CXL_PMEM_SEC_STATE_LOCKED)
|
||||
return -EINVAL;
|
||||
|
||||
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
|
||||
if (rc < 0) {
|
||||
dev_err(cxlds->dev, "Failed to sanitize device : %d", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL);
|
||||
|
||||
static int add_dpa_res(struct device *dev, struct resource *parent,
|
||||
struct resource *res, resource_size_t start,
|
||||
resource_size_t size, const char *type)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright(c) 2020 Intel Corporation. */
|
||||
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/idr.h>
|
||||
|
@ -107,6 +108,88 @@ static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
static DEVICE_ATTR_RO(numa_node);
|
||||
|
||||
static ssize_t security_state_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
unsigned long state = cxlds->security.state;
|
||||
u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
|
||||
u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
|
||||
u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
|
||||
|
||||
if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
|
||||
return sysfs_emit(buf, "sanitize\n");
|
||||
|
||||
if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
|
||||
return sysfs_emit(buf, "disabled\n");
|
||||
if (state & CXL_PMEM_SEC_STATE_FROZEN ||
|
||||
state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
|
||||
state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
|
||||
return sysfs_emit(buf, "frozen\n");
|
||||
if (state & CXL_PMEM_SEC_STATE_LOCKED)
|
||||
return sysfs_emit(buf, "locked\n");
|
||||
else
|
||||
return sysfs_emit(buf, "unlocked\n");
|
||||
}
|
||||
static struct device_attribute dev_attr_security_state =
|
||||
__ATTR(state, 0444, security_state_show, NULL);
|
||||
|
||||
static ssize_t security_sanitize_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct cxl_port *port = dev_get_drvdata(&cxlmd->dev);
|
||||
ssize_t rc;
|
||||
bool sanitize;
|
||||
|
||||
if (kstrtobool(buf, &sanitize) || !sanitize)
|
||||
return -EINVAL;
|
||||
|
||||
if (!port || !is_cxl_endpoint(port))
|
||||
return -EINVAL;
|
||||
|
||||
/* ensure no regions are mapped to this memdev */
|
||||
if (port->commit_end != -1)
|
||||
return -EBUSY;
|
||||
|
||||
rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SANITIZE);
|
||||
|
||||
return rc ? rc : len;
|
||||
}
|
||||
static struct device_attribute dev_attr_security_sanitize =
|
||||
__ATTR(sanitize, 0200, NULL, security_sanitize_store);
|
||||
|
||||
static ssize_t security_erase_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct cxl_port *port = dev_get_drvdata(&cxlmd->dev);
|
||||
ssize_t rc;
|
||||
bool erase;
|
||||
|
||||
if (kstrtobool(buf, &erase) || !erase)
|
||||
return -EINVAL;
|
||||
|
||||
if (!port || !is_cxl_endpoint(port))
|
||||
return -EINVAL;
|
||||
|
||||
/* ensure no regions are mapped to this memdev */
|
||||
if (port->commit_end != -1)
|
||||
return -EBUSY;
|
||||
|
||||
rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SECURE_ERASE);
|
||||
|
||||
return rc ? rc : len;
|
||||
}
|
||||
static struct device_attribute dev_attr_security_erase =
|
||||
__ATTR(erase, 0200, NULL, security_erase_store);
|
||||
|
||||
static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
|
||||
{
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
|
@ -352,6 +435,13 @@ static struct attribute *cxl_memdev_ram_attributes[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *cxl_memdev_security_attributes[] = {
|
||||
&dev_attr_security_state.attr,
|
||||
&dev_attr_security_sanitize.attr,
|
||||
&dev_attr_security_erase.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
|
||||
int n)
|
||||
{
|
||||
|
@ -375,10 +465,16 @@ static struct attribute_group cxl_memdev_pmem_attribute_group = {
|
|||
.attrs = cxl_memdev_pmem_attributes,
|
||||
};
|
||||
|
||||
static struct attribute_group cxl_memdev_security_attribute_group = {
|
||||
.name = "security",
|
||||
.attrs = cxl_memdev_security_attributes,
|
||||
};
|
||||
|
||||
static const struct attribute_group *cxl_memdev_attribute_groups[] = {
|
||||
&cxl_memdev_attribute_group,
|
||||
&cxl_memdev_ram_attribute_group,
|
||||
&cxl_memdev_pmem_attribute_group,
|
||||
&cxl_memdev_security_attribute_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -427,11 +523,21 @@ void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cm
|
|||
}
|
||||
EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
|
||||
|
||||
static void cxl_memdev_security_shutdown(struct device *dev)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
|
||||
if (cxlds->security.poll)
|
||||
cancel_delayed_work_sync(&cxlds->security.poll_dwork);
|
||||
}
|
||||
|
||||
static void cxl_memdev_shutdown(struct device *dev)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
|
||||
down_write(&cxl_memdev_rwsem);
|
||||
cxl_memdev_security_shutdown(dev);
|
||||
cxlmd->cxlds = NULL;
|
||||
up_write(&cxl_memdev_rwsem);
|
||||
}
|
||||
|
@ -551,6 +657,34 @@ static const struct file_operations cxl_memdev_fops = {
|
|||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static void put_sanitize(void *data)
|
||||
{
|
||||
struct cxl_dev_state *cxlds = data;
|
||||
|
||||
sysfs_put(cxlds->security.sanitize_node);
|
||||
}
|
||||
|
||||
static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
|
||||
{
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct device *dev = &cxlmd->dev;
|
||||
struct kernfs_node *sec;
|
||||
|
||||
sec = sysfs_get_dirent(dev->kobj.sd, "security");
|
||||
if (!sec) {
|
||||
dev_err(dev, "sysfs_get_dirent 'security' failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
cxlds->security.sanitize_node = sysfs_get_dirent(sec, "state");
|
||||
sysfs_put(sec);
|
||||
if (!cxlds->security.sanitize_node) {
|
||||
dev_err(dev, "sysfs_get_dirent 'state' failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return devm_add_action_or_reset(cxlds->dev, put_sanitize, cxlds);
|
||||
}
|
||||
|
||||
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
|
||||
{
|
||||
struct cxl_memdev *cxlmd;
|
||||
|
@ -579,6 +713,10 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
|
|||
if (rc)
|
||||
goto err;
|
||||
|
||||
rc = cxl_memdev_security_init(cxlmd);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
|
|
@ -176,14 +176,22 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
|
|||
/* CXL 2.0 8.2.8.4 Mailbox Registers */
|
||||
#define CXLDEV_MBOX_CAPS_OFFSET 0x00
|
||||
#define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
|
||||
#define CXLDEV_MBOX_CAP_BG_CMD_IRQ BIT(6)
|
||||
#define CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK GENMASK(10, 7)
|
||||
#define CXLDEV_MBOX_CTRL_OFFSET 0x04
|
||||
#define CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
|
||||
#define CXLDEV_MBOX_CTRL_BG_CMD_IRQ BIT(2)
|
||||
#define CXLDEV_MBOX_CMD_OFFSET 0x08
|
||||
#define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
|
||||
#define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
|
||||
#define CXLDEV_MBOX_STATUS_OFFSET 0x10
|
||||
#define CXLDEV_MBOX_STATUS_BG_CMD BIT(0)
|
||||
#define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
|
||||
#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
|
||||
#define CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
|
||||
#define CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK GENMASK_ULL(22, 16)
|
||||
#define CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK GENMASK_ULL(47, 32)
|
||||
#define CXLDEV_MBOX_BG_CMD_COMMAND_VENDOR_MASK GENMASK_ULL(63, 48)
|
||||
#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
|
||||
|
||||
/*
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <uapi/linux/cxl_mem.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/rcuwait.h>
|
||||
#include "cxl.h"
|
||||
|
||||
/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
|
||||
|
@ -108,6 +109,9 @@ static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
|
|||
* variable sized output commands, it tells the exact number of bytes
|
||||
* written.
|
||||
* @min_out: (input) internal command output payload size validation
|
||||
* @poll_count: (input) Number of timeouts to attempt.
|
||||
* @poll_interval_ms: (input) Time between mailbox background command polling
|
||||
* interval timeouts.
|
||||
* @return_code: (output) Error code returned from hardware.
|
||||
*
|
||||
* This is the primary mechanism used to send commands to the hardware.
|
||||
|
@ -123,6 +127,8 @@ struct cxl_mbox_cmd {
|
|||
size_t size_in;
|
||||
size_t size_out;
|
||||
size_t min_out;
|
||||
int poll_count;
|
||||
int poll_interval_ms;
|
||||
u16 return_code;
|
||||
};
|
||||
|
||||
|
@ -254,6 +260,23 @@ struct cxl_poison_state {
|
|||
struct mutex lock; /* Protect reads of poison list */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cxl_security_state - Device security state
|
||||
*
|
||||
* @state: state of last security operation
|
||||
* @poll: polling for sanitization is enabled, device has no mbox irq support
|
||||
* @poll_tmo_secs: polling timeout
|
||||
* @poll_dwork: polling work item
|
||||
* @sanitize_node: sanitation sysfs file to notify
|
||||
*/
|
||||
struct cxl_security_state {
|
||||
unsigned long state;
|
||||
bool poll;
|
||||
int poll_tmo_secs;
|
||||
struct delayed_work poll_dwork;
|
||||
struct kernfs_node *sanitize_node;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cxl_dev_state - The driver device state
|
||||
*
|
||||
|
@ -330,7 +353,9 @@ struct cxl_dev_state {
|
|||
|
||||
struct cxl_event_state event;
|
||||
struct cxl_poison_state poison;
|
||||
struct cxl_security_state security;
|
||||
|
||||
struct rcuwait mbox_wait;
|
||||
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
|
||||
};
|
||||
|
||||
|
@ -362,6 +387,8 @@ enum cxl_opcode {
|
|||
CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
|
||||
CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
|
||||
CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
|
||||
CXL_MBOX_OP_SANITIZE = 0x4400,
|
||||
CXL_MBOX_OP_SECURE_ERASE = 0x4401,
|
||||
CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500,
|
||||
CXL_MBOX_OP_SET_PASSPHRASE = 0x4501,
|
||||
CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502,
|
||||
|
@ -722,6 +749,8 @@ static inline void cxl_mem_active_dec(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
int cxl_mem_sanitize(struct cxl_dev_state *cxlds, u16 cmd);
|
||||
|
||||
struct cxl_hdm {
|
||||
struct cxl_component_regs regs;
|
||||
unsigned int decoder_count;
|
||||
|
|
|
@ -84,6 +84,89 @@ static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
|
|||
status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
|
||||
status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
|
||||
|
||||
struct cxl_dev_id {
|
||||
struct cxl_dev_state *cxlds;
|
||||
};
|
||||
|
||||
static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq,
|
||||
irq_handler_t handler, irq_handler_t thread_fn)
|
||||
{
|
||||
struct device *dev = cxlds->dev;
|
||||
struct cxl_dev_id *dev_id;
|
||||
|
||||
/* dev_id must be globally unique and must contain the cxlds */
|
||||
dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
|
||||
if (!dev_id)
|
||||
return -ENOMEM;
|
||||
dev_id->cxlds = cxlds;
|
||||
|
||||
return devm_request_threaded_irq(dev, irq, handler, thread_fn,
|
||||
IRQF_SHARED | IRQF_ONESHOT,
|
||||
NULL, dev_id);
|
||||
}
|
||||
|
||||
static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds)
|
||||
{
|
||||
u64 reg;
|
||||
|
||||
reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
|
||||
return FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg) == 100;
|
||||
}
|
||||
|
||||
static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
|
||||
{
|
||||
u64 reg;
|
||||
u16 opcode;
|
||||
struct cxl_dev_id *dev_id = id;
|
||||
struct cxl_dev_state *cxlds = dev_id->cxlds;
|
||||
|
||||
if (!cxl_mbox_background_complete(cxlds))
|
||||
return IRQ_NONE;
|
||||
|
||||
reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
|
||||
opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
|
||||
if (opcode == CXL_MBOX_OP_SANITIZE) {
|
||||
if (cxlds->security.sanitize_node)
|
||||
sysfs_notify_dirent(cxlds->security.sanitize_node);
|
||||
|
||||
dev_dbg(cxlds->dev, "Sanitization operation ended\n");
|
||||
} else {
|
||||
/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
|
||||
rcuwait_wake_up(&cxlds->mbox_wait);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanitization operation polling mode.
|
||||
*/
|
||||
static void cxl_mbox_sanitize_work(struct work_struct *work)
|
||||
{
|
||||
struct cxl_dev_state *cxlds;
|
||||
|
||||
cxlds = container_of(work,
|
||||
struct cxl_dev_state, security.poll_dwork.work);
|
||||
|
||||
mutex_lock(&cxlds->mbox_mutex);
|
||||
if (cxl_mbox_background_complete(cxlds)) {
|
||||
cxlds->security.poll_tmo_secs = 0;
|
||||
put_device(cxlds->dev);
|
||||
|
||||
if (cxlds->security.sanitize_node)
|
||||
sysfs_notify_dirent(cxlds->security.sanitize_node);
|
||||
|
||||
dev_dbg(cxlds->dev, "Sanitization operation ended\n");
|
||||
} else {
|
||||
int timeout = cxlds->security.poll_tmo_secs + 10;
|
||||
|
||||
cxlds->security.poll_tmo_secs = min(15 * 60, timeout);
|
||||
queue_delayed_work(system_wq, &cxlds->security.poll_dwork,
|
||||
timeout * HZ);
|
||||
}
|
||||
mutex_unlock(&cxlds->mbox_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* __cxl_pci_mbox_send_cmd() - Execute a mailbox command
|
||||
* @cxlds: The device state to communicate with.
|
||||
|
@ -144,6 +227,16 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* With sanitize polling, hardware might be done and the poller still
|
||||
* not be in sync. Ensure no new command comes in until so. Keep the
|
||||
* hardware semantics and only allow device health status.
|
||||
*/
|
||||
if (cxlds->security.poll_tmo_secs > 0) {
|
||||
if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
|
||||
mbox_cmd->opcode);
|
||||
if (mbox_cmd->size_in) {
|
||||
|
@ -177,12 +270,80 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
|
|||
mbox_cmd->return_code =
|
||||
FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
|
||||
|
||||
/*
|
||||
* Handle the background command in a synchronous manner.
|
||||
*
|
||||
* All other mailbox commands will serialize/queue on the mbox_mutex,
|
||||
* which we currently hold. Furthermore this also guarantees that
|
||||
* cxl_mbox_background_complete() checks are safe amongst each other,
|
||||
* in that no new bg operation can occur in between.
|
||||
*
|
||||
* Background operations are timesliced in accordance with the nature
|
||||
* of the command. In the event of timeout, the mailbox state is
|
||||
* indeterminate until the next successful command submission and the
|
||||
* driver can get back in sync with the hardware state.
|
||||
*/
|
||||
if (mbox_cmd->return_code == CXL_MBOX_CMD_RC_BACKGROUND) {
|
||||
u64 bg_status_reg;
|
||||
int i, timeout;
|
||||
|
||||
/*
|
||||
* Sanitization is a special case which monopolizes the device
|
||||
* and cannot be timesliced. Handle asynchronously instead,
|
||||
* and allow userspace to poll(2) for completion.
|
||||
*/
|
||||
if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
|
||||
if (cxlds->security.poll_tmo_secs != -1) {
|
||||
/* hold the device throughout */
|
||||
get_device(cxlds->dev);
|
||||
|
||||
/* give first timeout a second */
|
||||
timeout = 1;
|
||||
cxlds->security.poll_tmo_secs = timeout;
|
||||
queue_delayed_work(system_wq,
|
||||
&cxlds->security.poll_dwork,
|
||||
timeout * HZ);
|
||||
}
|
||||
|
||||
dev_dbg(dev, "Sanitization operation started\n");
|
||||
goto success;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "Mailbox background operation (0x%04x) started\n",
|
||||
mbox_cmd->opcode);
|
||||
|
||||
timeout = mbox_cmd->poll_interval_ms;
|
||||
for (i = 0; i < mbox_cmd->poll_count; i++) {
|
||||
if (rcuwait_wait_event_timeout(&cxlds->mbox_wait,
|
||||
cxl_mbox_background_complete(cxlds),
|
||||
TASK_UNINTERRUPTIBLE,
|
||||
msecs_to_jiffies(timeout)) > 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!cxl_mbox_background_complete(cxlds)) {
|
||||
dev_err(dev, "timeout waiting for background (%d ms)\n",
|
||||
timeout * mbox_cmd->poll_count);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
bg_status_reg = readq(cxlds->regs.mbox +
|
||||
CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
|
||||
mbox_cmd->return_code =
|
||||
FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK,
|
||||
bg_status_reg);
|
||||
dev_dbg(dev,
|
||||
"Mailbox background operation (0x%04x) completed\n",
|
||||
mbox_cmd->opcode);
|
||||
}
|
||||
|
||||
if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
|
||||
dev_dbg(dev, "Mailbox operation had an error: %s\n",
|
||||
cxl_mbox_cmd_rc2str(mbox_cmd));
|
||||
return 0; /* completed but caller must check return_code */
|
||||
}
|
||||
|
||||
success:
|
||||
/* #7 */
|
||||
cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
|
||||
out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
|
||||
|
@ -271,6 +432,34 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
|
|||
dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
|
||||
cxlds->payload_size);
|
||||
|
||||
rcuwait_init(&cxlds->mbox_wait);
|
||||
|
||||
if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
|
||||
u32 ctrl;
|
||||
int irq, msgnum;
|
||||
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
|
||||
|
||||
msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
|
||||
irq = pci_irq_vector(pdev, msgnum);
|
||||
if (irq < 0)
|
||||
goto mbox_poll;
|
||||
|
||||
if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
|
||||
goto mbox_poll;
|
||||
|
||||
/* enable background command mbox irq support */
|
||||
ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
|
||||
ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
|
||||
writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
mbox_poll:
|
||||
cxlds->security.poll = true;
|
||||
INIT_DELAYED_WORK(&cxlds->security.poll_dwork, cxl_mbox_sanitize_work);
|
||||
|
||||
dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -469,10 +658,6 @@ static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct cxl_dev_id {
|
||||
struct cxl_dev_state *cxlds;
|
||||
};
|
||||
|
||||
static irqreturn_t cxl_event_thread(int irq, void *id)
|
||||
{
|
||||
struct cxl_dev_id *dev_id = id;
|
||||
|
@ -498,28 +683,18 @@ static irqreturn_t cxl_event_thread(int irq, void *id)
|
|||
|
||||
static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
|
||||
{
|
||||
struct device *dev = cxlds->dev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct cxl_dev_id *dev_id;
|
||||
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
|
||||
int irq;
|
||||
|
||||
if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX)
|
||||
return -ENXIO;
|
||||
|
||||
/* dev_id must be globally unique and must contain the cxlds */
|
||||
dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
|
||||
if (!dev_id)
|
||||
return -ENOMEM;
|
||||
dev_id->cxlds = cxlds;
|
||||
|
||||
irq = pci_irq_vector(pdev,
|
||||
FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting));
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
return devm_request_threaded_irq(dev, irq, NULL, cxl_event_thread,
|
||||
IRQF_SHARED | IRQF_ONESHOT, NULL,
|
||||
dev_id);
|
||||
return cxl_request_irq(cxlds, irq, NULL, cxl_event_thread);
|
||||
}
|
||||
|
||||
static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds,
|
||||
|
@ -714,6 +889,10 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
else
|
||||
dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
|
||||
|
||||
rc = cxl_alloc_irq_vectors(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = cxl_pci_setup_mailbox(cxlds);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -738,10 +917,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = cxl_alloc_irq_vectors(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
cxlmd = devm_cxl_add_memdev(cxlds);
|
||||
if (IS_ERR(cxlmd))
|
||||
return PTR_ERR(cxlmd);
|
||||
|
|
|
@ -34,6 +34,9 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
|
|||
return 0;
|
||||
|
||||
sec_out = le32_to_cpu(out.flags);
|
||||
/* cache security state */
|
||||
cxlds->security.state = sec_out;
|
||||
|
||||
if (ptype == NVDIMM_MASTER) {
|
||||
if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)
|
||||
set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
|
||||
|
|
|
@ -49,9 +49,9 @@ static inline void prepare_to_rcuwait(struct rcuwait *w)
|
|||
|
||||
extern void finish_rcuwait(struct rcuwait *w);
|
||||
|
||||
#define rcuwait_wait_event(w, condition, state) \
|
||||
#define ___rcuwait_wait_event(w, condition, state, ret, cmd) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
long __ret = ret; \
|
||||
prepare_to_rcuwait(w); \
|
||||
for (;;) { \
|
||||
/* \
|
||||
|
@ -67,10 +67,27 @@ extern void finish_rcuwait(struct rcuwait *w);
|
|||
break; \
|
||||
} \
|
||||
\
|
||||
schedule(); \
|
||||
cmd; \
|
||||
} \
|
||||
finish_rcuwait(w); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define rcuwait_wait_event(w, condition, state) \
|
||||
___rcuwait_wait_event(w, condition, state, 0, schedule())
|
||||
|
||||
#define __rcuwait_wait_event_timeout(w, condition, state, timeout) \
|
||||
___rcuwait_wait_event(w, ___wait_cond_timeout(condition), \
|
||||
state, timeout, \
|
||||
__ret = schedule_timeout(__ret))
|
||||
|
||||
#define rcuwait_wait_event_timeout(w, condition, state, timeout) \
|
||||
({ \
|
||||
long __ret = timeout; \
|
||||
if (!___wait_cond_timeout(condition)) \
|
||||
__ret = __rcuwait_wait_event_timeout(w, condition, \
|
||||
state, timeout); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#endif /* _LINUX_RCUWAIT_H_ */
|
||||
|
|
|
@ -535,6 +535,52 @@ static int mock_partition_info(struct cxl_dev_state *cxlds,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mock_sanitize(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
|
||||
{
|
||||
struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
|
||||
|
||||
if (cmd->size_in != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd->size_out != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
|
||||
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
|
||||
return -ENXIO;
|
||||
}
|
||||
if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
|
||||
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0; /* assume less than 2 secs, no bg */
|
||||
}
|
||||
|
||||
static int mock_secure_erase(struct cxl_dev_state *cxlds,
|
||||
struct cxl_mbox_cmd *cmd)
|
||||
{
|
||||
struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
|
||||
|
||||
if (cmd->size_in != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd->size_out != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
|
||||
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
|
||||
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mock_get_security_state(struct cxl_dev_state *cxlds,
|
||||
struct cxl_mbox_cmd *cmd)
|
||||
{
|
||||
|
@ -1153,6 +1199,12 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *
|
|||
case CXL_MBOX_OP_GET_HEALTH_INFO:
|
||||
rc = mock_health_info(cxlds, cmd);
|
||||
break;
|
||||
case CXL_MBOX_OP_SANITIZE:
|
||||
rc = mock_sanitize(cxlds, cmd);
|
||||
break;
|
||||
case CXL_MBOX_OP_SECURE_ERASE:
|
||||
rc = mock_secure_erase(cxlds, cmd);
|
||||
break;
|
||||
case CXL_MBOX_OP_GET_SECURITY_STATE:
|
||||
rc = mock_get_security_state(cxlds, cmd);
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue