2021-02-17 12:09:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
|
2021-02-17 12:09:53 +08:00
|
|
|
#include <uapi/linux/cxl_mem.h>
|
2021-02-17 12:09:54 +08:00
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/debugfs.h>
|
2021-02-17 12:09:50 +08:00
|
|
|
#include <linux/module.h>
|
2021-04-17 08:43:30 +08:00
|
|
|
#include <linux/sizes.h>
|
2021-02-17 12:09:52 +08:00
|
|
|
#include <linux/mutex.h>
|
2021-06-04 08:50:36 +08:00
|
|
|
#include <linux/list.h>
|
2021-02-17 12:09:52 +08:00
|
|
|
#include <linux/cdev.h>
|
|
|
|
#include <linux/idr.h>
|
2021-02-17 12:09:50 +08:00
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/io.h>
|
2021-02-17 12:09:51 +08:00
|
|
|
#include <linux/io-64-nonatomic-lo-hi.h>
|
2021-08-03 01:29:38 +08:00
|
|
|
#include "cxlmem.h"
|
2021-02-17 12:09:50 +08:00
|
|
|
#include "pci.h"
|
2021-02-17 12:09:51 +08:00
|
|
|
#include "cxl.h"
|
|
|
|
|
|
|
|
/**
|
cxl: Rename mem to pci
As the driver has undergone development, it's become clear that the
majority [entirety?] of the current functionality in mem.c is actually a
layer encapsulating functionality exposed through PCI based
interactions. This layer can be used either in isolation or to provide
functionality for higher level functionality.
CXL capabilities exist in a parallel domain to PCIe. CXL devices are
enumerable and controllable via "legacy" PCIe mechanisms; however, their
CXL capabilities are a superset of PCIe. For example, a CXL device may
be connected to a non-CXL capable PCIe root port, and therefore will not
be able to participate in CXL.mem or CXL.cache operations, but can still
be accessed through PCIe mechanisms for CXL.io operations.
To properly represent the PCI nature of this driver, and in preparation for
introducing a new driver for the CXL.mem / HDM decoder (Host-managed Device
Memory) capabilities of a CXL memory expander, rename mem.c to pci.c so that
mem.c is available for this new driver.
The result of the change is that there is a clear layering distinction
in the driver, and a systems administrator may load only the cxl_pci
module and gain access to such operations as, firmware update, offline
provisioning of devices, and error collection. In addition to freeing up
the file name for another purpose, there are two primary reasons this is
useful,
1. Acting upon devices which don't have full CXL capabilities. This
may happen for instance if the CXL device is connected in a CXL
unaware part of the platform topology.
2. Userspace-first provisioning for devices without kernel driver
interference. This may be useful when provisioning a new device
in a specific manner that might otherwise be blocked or prevented
by the real CXL mem driver.
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
Link: https://lore.kernel.org/r/20210526174413.802913-1-ben.widawsky@intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-27 01:44:13 +08:00
|
|
|
* DOC: cxl pci
|
2021-02-17 12:09:51 +08:00
|
|
|
*
|
cxl: Rename mem to pci
As the driver has undergone development, it's become clear that the
majority [entirety?] of the current functionality in mem.c is actually a
layer encapsulating functionality exposed through PCI based
interactions. This layer can be used either in isolation or to provide
functionality for higher level functionality.
CXL capabilities exist in a parallel domain to PCIe. CXL devices are
enumerable and controllable via "legacy" PCIe mechanisms; however, their
CXL capabilities are a superset of PCIe. For example, a CXL device may
be connected to a non-CXL capable PCIe root port, and therefore will not
be able to participate in CXL.mem or CXL.cache operations, but can still
be accessed through PCIe mechanisms for CXL.io operations.
To properly represent the PCI nature of this driver, and in preparation for
introducing a new driver for the CXL.mem / HDM decoder (Host-managed Device
Memory) capabilities of a CXL memory expander, rename mem.c to pci.c so that
mem.c is available for this new driver.
The result of the change is that there is a clear layering distinction
in the driver, and a systems administrator may load only the cxl_pci
module and gain access to such operations as, firmware update, offline
provisioning of devices, and error collection. In addition to freeing up
the file name for another purpose, there are two primary reasons this is
useful,
1. Acting upon devices which don't have full CXL capabilities. This
may happen for instance if the CXL device is connected in a CXL
unaware part of the platform topology.
2. Userspace-first provisioning for devices without kernel driver
interference. This may be useful when provisioning a new device
in a specific manner that might otherwise be blocked or prevented
by the real CXL mem driver.
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
Link: https://lore.kernel.org/r/20210526174413.802913-1-ben.widawsky@intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-27 01:44:13 +08:00
|
|
|
* This implements the PCI exclusive functionality for a CXL device as it is
|
|
|
|
* defined by the Compute Express Link specification. CXL devices may surface
|
|
|
|
* certain functionality even if it isn't CXL enabled.
|
2021-02-17 12:09:51 +08:00
|
|
|
*
|
|
|
|
* The driver has several responsibilities, mainly:
|
|
|
|
* - Create the memX device and register on the CXL bus.
|
|
|
|
* - Enumerate device's register interface and map them.
|
|
|
|
* - Probe the device attributes to establish sysfs interface.
|
|
|
|
* - Provide an IOCTL interface to userspace to communicate with the device for
|
|
|
|
* things like firmware update.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define cxl_doorbell_busy(cxlm) \
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
(readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
|
2021-02-17 12:09:51 +08:00
|
|
|
CXLDEV_MBOX_CTRL_DOORBELL)
|
|
|
|
|
|
|
|
/* CXL 2.0 - 8.2.8.4 */
|
|
|
|
#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
|
|
|
|
|
|
|
|
enum opcode {
|
2021-02-17 12:09:53 +08:00
|
|
|
CXL_MBOX_OP_INVALID = 0x0000,
|
2021-02-17 12:09:54 +08:00
|
|
|
CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID,
|
2021-02-17 12:09:56 +08:00
|
|
|
CXL_MBOX_OP_GET_FW_INFO = 0x0200,
|
2021-02-17 12:09:54 +08:00
|
|
|
CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
|
2021-02-17 12:09:55 +08:00
|
|
|
CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
|
|
|
|
CXL_MBOX_OP_GET_LOG = 0x0401,
|
2021-02-17 12:09:51 +08:00
|
|
|
CXL_MBOX_OP_IDENTIFY = 0x4000,
|
2021-02-17 12:09:56 +08:00
|
|
|
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
|
2021-02-17 12:09:54 +08:00
|
|
|
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
|
2021-02-17 12:09:56 +08:00
|
|
|
CXL_MBOX_OP_GET_LSA = 0x4102,
|
2021-02-17 12:09:54 +08:00
|
|
|
CXL_MBOX_OP_SET_LSA = 0x4103,
|
2021-02-17 12:09:56 +08:00
|
|
|
CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200,
|
2021-04-13 22:09:07 +08:00
|
|
|
CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201,
|
|
|
|
CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202,
|
|
|
|
CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203,
|
2021-02-17 12:09:54 +08:00
|
|
|
CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204,
|
2021-04-13 22:09:07 +08:00
|
|
|
CXL_MBOX_OP_GET_POISON = 0x4300,
|
|
|
|
CXL_MBOX_OP_INJECT_POISON = 0x4301,
|
|
|
|
CXL_MBOX_OP_CLEAR_POISON = 0x4302,
|
|
|
|
CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
|
2021-02-17 12:09:54 +08:00
|
|
|
CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
|
|
|
|
CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
|
2021-02-17 12:09:51 +08:00
|
|
|
CXL_MBOX_OP_MAX = 0x10000
|
|
|
|
};
|
|
|
|
|
2021-06-18 06:16:18 +08:00
|
|
|
/*
|
|
|
|
* CXL 2.0 - Memory capacity multiplier
|
|
|
|
* See Section 8.2.9.5
|
|
|
|
*
|
|
|
|
* Volatile, Persistent, and Partition capacities are specified to be in
|
|
|
|
* multiples of 256MB - define a multiplier to convert to/from bytes.
|
|
|
|
*/
|
|
|
|
#define CXL_CAPACITY_MULTIPLIER SZ_256M
|
|
|
|
|
2021-02-17 12:09:51 +08:00
|
|
|
/**
|
|
|
|
* struct mbox_cmd - A command to be submitted to hardware.
|
|
|
|
* @opcode: (input) The command set and command submitted to hardware.
|
|
|
|
* @payload_in: (input) Pointer to the input payload.
|
|
|
|
* @payload_out: (output) Pointer to the output payload. Must be allocated by
|
|
|
|
* the caller.
|
|
|
|
* @size_in: (input) Number of bytes to load from @payload_in.
|
|
|
|
* @size_out: (input) Max number of bytes loaded into @payload_out.
|
|
|
|
* (output) Number of bytes generated by the device. For fixed size
|
|
|
|
* outputs commands this is always expected to be deterministic. For
|
|
|
|
* variable sized output commands, it tells the exact number of bytes
|
|
|
|
* written.
|
|
|
|
* @return_code: (output) Error code returned from hardware.
|
|
|
|
*
|
|
|
|
* This is the primary mechanism used to send commands to the hardware.
|
|
|
|
* All the fields except @payload_* correspond exactly to the fields described in
|
|
|
|
* Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
|
|
|
|
* @payload_out are written to, and read from the Command Payload Registers
|
|
|
|
* defined in CXL 2.0 8.2.8.4.8.
|
|
|
|
*/
|
|
|
|
struct mbox_cmd {
|
|
|
|
u16 opcode;
|
|
|
|
void *payload_in;
|
|
|
|
void *payload_out;
|
|
|
|
size_t size_in;
|
|
|
|
size_t size_out;
|
|
|
|
u16 return_code;
|
|
|
|
#define CXL_MBOX_SUCCESS 0
|
|
|
|
};
|
|
|
|
|
2021-04-01 22:33:19 +08:00
|
|
|
static DECLARE_RWSEM(cxl_memdev_rwsem);
|
2021-02-17 12:09:54 +08:00
|
|
|
static struct dentry *cxl_debugfs;
|
|
|
|
static bool cxl_raw_allow_all;
|
2021-02-17 12:09:52 +08:00
|
|
|
|
2021-02-17 12:09:55 +08:00
|
|
|
enum {
|
|
|
|
CEL_UUID,
|
|
|
|
VENDOR_DEBUG_UUID,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* See CXL 2.0 Table 170. Get Log Input Payload */
|
|
|
|
static const uuid_t log_uuid[] = {
|
|
|
|
[CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96,
|
|
|
|
0xb1, 0x62, 0x3b, 0x3f, 0x17),
|
|
|
|
[VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f,
|
|
|
|
0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86),
|
|
|
|
};
|
|
|
|
|
2021-02-17 12:09:53 +08:00
|
|
|
/**
|
|
|
|
* struct cxl_mem_command - Driver representation of a memory device command
|
|
|
|
* @info: Command information as it exists for the UAPI
|
|
|
|
* @opcode: The actual bits used for the mailbox protocol
|
2021-02-17 12:09:55 +08:00
|
|
|
* @flags: Set of flags effecting driver behavior.
|
|
|
|
*
|
|
|
|
* * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag
|
|
|
|
* will be enabled by the driver regardless of what hardware may have
|
|
|
|
* advertised.
|
2021-02-17 12:09:53 +08:00
|
|
|
*
|
|
|
|
* The cxl_mem_command is the driver's internal representation of commands that
|
|
|
|
* are supported by the driver. Some of these commands may not be supported by
|
|
|
|
* the hardware. The driver will use @info to validate the fields passed in by
|
|
|
|
* the user then submit the @opcode to the hardware.
|
|
|
|
*
|
|
|
|
* See struct cxl_command_info.
|
|
|
|
*/
|
|
|
|
struct cxl_mem_command {
|
|
|
|
struct cxl_command_info info;
|
|
|
|
enum opcode opcode;
|
2021-02-17 12:09:55 +08:00
|
|
|
u32 flags;
|
|
|
|
#define CXL_CMD_FLAG_NONE 0
|
|
|
|
#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
|
2021-02-17 12:09:53 +08:00
|
|
|
};
|
|
|
|
|
2021-02-17 12:09:55 +08:00
|
|
|
#define CXL_CMD(_id, sin, sout, _flags) \
|
2021-02-17 12:09:53 +08:00
|
|
|
[CXL_MEM_COMMAND_ID_##_id] = { \
|
|
|
|
.info = { \
|
|
|
|
.id = CXL_MEM_COMMAND_ID_##_id, \
|
|
|
|
.size_in = sin, \
|
|
|
|
.size_out = sout, \
|
|
|
|
}, \
|
|
|
|
.opcode = CXL_MBOX_OP_##_id, \
|
2021-02-17 12:09:55 +08:00
|
|
|
.flags = _flags, \
|
2021-02-17 12:09:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This table defines the supported mailbox commands for the driver. This table
|
|
|
|
* is made up of a UAPI structure. Non-negative values as parameters in the
|
|
|
|
* table will be validated against the user's input. For example, if size_in is
|
|
|
|
* 0, and the user passed in 1, it is an error.
|
|
|
|
*/
|
2021-03-24 22:16:35 +08:00
|
|
|
static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
|
2021-02-17 12:09:55 +08:00
|
|
|
CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
|
2021-02-17 12:09:54 +08:00
|
|
|
#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
|
2021-02-17 12:09:55 +08:00
|
|
|
CXL_CMD(RAW, ~0, ~0, 0),
|
2021-02-17 12:09:54 +08:00
|
|
|
#endif
|
2021-02-17 12:09:55 +08:00
|
|
|
CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
|
2021-02-17 12:09:56 +08:00
|
|
|
CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
|
|
|
|
CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
|
|
|
|
CXL_CMD(GET_LSA, 0x8, ~0, 0),
|
|
|
|
CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
|
|
|
|
CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
|
2021-04-13 22:09:07 +08:00
|
|
|
CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
|
|
|
|
CXL_CMD(SET_LSA, ~0, 0, 0),
|
|
|
|
CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
|
|
|
|
CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
|
|
|
|
CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
|
|
|
|
CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
|
|
|
|
CXL_CMD(GET_POISON, 0x10, ~0, 0),
|
|
|
|
CXL_CMD(INJECT_POISON, 0x8, 0, 0),
|
|
|
|
CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
|
|
|
|
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
|
|
|
|
CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
|
|
|
|
CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0),
|
2021-02-17 12:09:54 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Commands that RAW doesn't permit. The rationale for each:
|
|
|
|
*
|
|
|
|
* CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
|
|
|
|
* coordination of transaction timeout values at the root bridge level.
|
|
|
|
*
|
|
|
|
* CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
|
|
|
|
* and needs to be coordinated with HDM updates.
|
|
|
|
*
|
|
|
|
* CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
|
|
|
|
* driver and any writes from userspace invalidates those contents.
|
|
|
|
*
|
|
|
|
* CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
|
|
|
|
* to the device after it is marked clean, userspace can not make that
|
|
|
|
* assertion.
|
|
|
|
*
|
|
|
|
* CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
|
|
|
|
* is kept up to date with patrol notifications and error management.
|
|
|
|
*/
|
|
|
|
static u16 cxl_disabled_raw_commands[] = {
|
|
|
|
CXL_MBOX_OP_ACTIVATE_FW,
|
|
|
|
CXL_MBOX_OP_SET_PARTITION_INFO,
|
|
|
|
CXL_MBOX_OP_SET_LSA,
|
|
|
|
CXL_MBOX_OP_SET_SHUTDOWN_STATE,
|
|
|
|
CXL_MBOX_OP_SCAN_MEDIA,
|
|
|
|
CXL_MBOX_OP_GET_SCAN_MEDIA,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Command sets that RAW doesn't permit. All opcodes in this set are
|
|
|
|
* disabled because they pass plain text security payloads over the
|
|
|
|
* user/kernel boundary. This functionality is intended to be wrapped
|
|
|
|
* behind the keys ABI which allows for encrypted payloads in the UAPI
|
|
|
|
*/
|
|
|
|
static u8 security_command_sets[] = {
|
|
|
|
0x44, /* Sanitize */
|
|
|
|
0x45, /* Persistent Memory Data-at-rest Security */
|
|
|
|
0x46, /* Security Passthrough */
|
2021-02-17 12:09:53 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define cxl_for_each_cmd(cmd) \
|
|
|
|
for ((cmd) = &mem_commands[0]; \
|
|
|
|
((cmd) - mem_commands) < ARRAY_SIZE(mem_commands); (cmd)++)
|
|
|
|
|
|
|
|
#define cxl_cmd_count ARRAY_SIZE(mem_commands)
|
|
|
|
|
2021-02-17 12:09:51 +08:00
|
|
|
static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm)
|
|
|
|
{
|
|
|
|
const unsigned long start = jiffies;
|
|
|
|
unsigned long end = start;
|
|
|
|
|
|
|
|
while (cxl_doorbell_busy(cxlm)) {
|
|
|
|
end = jiffies;
|
|
|
|
|
|
|
|
if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
|
|
|
|
/* Check again in case preempted before timeout test */
|
|
|
|
if (!cxl_doorbell_busy(cxlm))
|
|
|
|
break;
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(&cxlm->pdev->dev, "Doorbell wait took %dms",
|
|
|
|
jiffies_to_msecs(end) - jiffies_to_msecs(start));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:54 +08:00
|
|
|
static bool cxl_is_security_command(u16 opcode)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
|
|
|
|
if (security_command_sets[i] == (opcode >> 8))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:51 +08:00
|
|
|
static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm,
|
|
|
|
struct mbox_cmd *mbox_cmd)
|
|
|
|
{
|
|
|
|
struct device *dev = &cxlm->pdev->dev;
|
|
|
|
|
|
|
|
dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n",
|
|
|
|
mbox_cmd->opcode, mbox_cmd->size_in);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __cxl_mem_mbox_send_cmd() - Execute a mailbox command
|
|
|
|
* @cxlm: The CXL memory device to communicate with.
|
|
|
|
* @mbox_cmd: Command to send to the memory device.
|
|
|
|
*
|
|
|
|
* Context: Any context. Expects mbox_mutex to be held.
|
|
|
|
* Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
|
|
|
|
* Caller should check the return code in @mbox_cmd to make sure it
|
|
|
|
* succeeded.
|
|
|
|
*
|
|
|
|
* This is a generic form of the CXL mailbox send command thus only using the
|
|
|
|
* registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
|
|
|
|
* devices, and perhaps other types of CXL devices may have further information
|
|
|
|
* available upon error conditions. Driver facilities wishing to send mailbox
|
|
|
|
* commands should use the wrapper command.
|
|
|
|
*
|
|
|
|
* The CXL spec allows for up to two mailboxes. The intention is for the primary
|
|
|
|
* mailbox to be OS controlled and the secondary mailbox to be used by system
|
|
|
|
* firmware. This allows the OS and firmware to communicate with the device and
|
|
|
|
* not need to coordinate with each other. The driver only uses the primary
|
|
|
|
* mailbox.
|
|
|
|
*/
|
|
|
|
static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm,
|
|
|
|
struct mbox_cmd *mbox_cmd)
|
|
|
|
{
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
|
2021-02-17 12:09:51 +08:00
|
|
|
u64 cmd_reg, status_reg;
|
|
|
|
size_t out_len;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
lockdep_assert_held(&cxlm->mbox_mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
|
|
|
|
* 1. Caller reads MB Control Register to verify doorbell is clear
|
|
|
|
* 2. Caller writes Command Register
|
|
|
|
* 3. Caller writes Command Payload Registers if input payload is non-empty
|
|
|
|
* 4. Caller writes MB Control Register to set doorbell
|
|
|
|
* 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
|
|
|
|
* 6. Caller reads MB Status Register to fetch Return code
|
|
|
|
* 7. If command successful, Caller reads Command Register to get Payload Length
|
|
|
|
* 8. If output payload is non-empty, host reads Command Payload Registers
|
|
|
|
*
|
|
|
|
* Hardware is free to do whatever it wants before the doorbell is rung,
|
|
|
|
* and isn't allowed to change anything after it clears the doorbell. As
|
|
|
|
* such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
|
|
|
|
* also happen in any order (though some orders might not make sense).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* #1 */
|
|
|
|
if (cxl_doorbell_busy(cxlm)) {
|
|
|
|
dev_err_ratelimited(&cxlm->pdev->dev,
|
|
|
|
"Mailbox re-busy after acquiring\n");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
|
|
|
|
mbox_cmd->opcode);
|
|
|
|
if (mbox_cmd->size_in) {
|
|
|
|
if (WARN_ON(!mbox_cmd->payload_in))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
|
|
|
|
mbox_cmd->size_in);
|
|
|
|
memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* #2, #3 */
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
|
2021-02-17 12:09:51 +08:00
|
|
|
|
|
|
|
/* #4 */
|
|
|
|
dev_dbg(&cxlm->pdev->dev, "Sending command\n");
|
|
|
|
writel(CXLDEV_MBOX_CTRL_DOORBELL,
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
|
2021-02-17 12:09:51 +08:00
|
|
|
|
|
|
|
/* #5 */
|
|
|
|
rc = cxl_mem_wait_for_doorbell(cxlm);
|
|
|
|
if (rc == -ETIMEDOUT) {
|
|
|
|
cxl_mem_mbox_timeout(cxlm, mbox_cmd);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* #6 */
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
|
2021-02-17 12:09:51 +08:00
|
|
|
mbox_cmd->return_code =
|
|
|
|
FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
|
|
|
|
|
|
|
|
if (mbox_cmd->return_code != 0) {
|
|
|
|
dev_dbg(&cxlm->pdev->dev, "Mailbox operation had an error\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* #7 */
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
|
2021-02-17 12:09:51 +08:00
|
|
|
out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
|
|
|
|
|
|
|
|
/* #8 */
|
|
|
|
if (out_len && mbox_cmd->payload_out) {
|
|
|
|
/*
|
|
|
|
* Sanitize the copy. If hardware misbehaves, out_len per the
|
|
|
|
* spec can actually be greater than the max allowed size (21
|
|
|
|
* bits available but spec defined 1M max). The caller also may
|
|
|
|
* have requested less data than the hardware supplied even
|
|
|
|
* within spec.
|
|
|
|
*/
|
|
|
|
size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len);
|
|
|
|
|
|
|
|
memcpy_fromio(mbox_cmd->payload_out, payload, n);
|
|
|
|
mbox_cmd->size_out = n;
|
|
|
|
} else {
|
|
|
|
mbox_cmd->size_out = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cxl_mem_mbox_get() - Acquire exclusive access to the mailbox.
|
|
|
|
* @cxlm: The memory device to gain access to.
|
|
|
|
*
|
|
|
|
* Context: Any context. Takes the mbox_mutex.
|
|
|
|
* Return: 0 if exclusive access was acquired.
|
|
|
|
*/
|
|
|
|
static int cxl_mem_mbox_get(struct cxl_mem *cxlm)
|
|
|
|
{
|
|
|
|
struct device *dev = &cxlm->pdev->dev;
|
|
|
|
u64 md_status;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
mutex_lock_io(&cxlm->mbox_mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX: There is some amount of ambiguity in the 2.0 version of the spec
|
|
|
|
* around the mailbox interface ready (8.2.8.5.1.1). The purpose of the
|
|
|
|
* bit is to allow firmware running on the device to notify the driver
|
|
|
|
* that it's ready to receive commands. It is unclear if the bit needs
|
|
|
|
* to be read for each transaction mailbox, ie. the firmware can switch
|
|
|
|
* it on and off as needed. Second, there is no defined timeout for
|
|
|
|
* mailbox ready, like there is for the doorbell interface.
|
|
|
|
*
|
|
|
|
* Assumptions:
|
|
|
|
* 1. The firmware might toggle the Mailbox Interface Ready bit, check
|
|
|
|
* it for every command.
|
|
|
|
*
|
|
|
|
* 2. If the doorbell is clear, the firmware should have first set the
|
|
|
|
* Mailbox Interface Ready bit. Therefore, waiting for the doorbell
|
|
|
|
* to be ready is sufficient.
|
|
|
|
*/
|
|
|
|
rc = cxl_mem_wait_for_doorbell(cxlm);
|
|
|
|
if (rc) {
|
|
|
|
dev_warn(dev, "Mailbox interface not ready\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET);
|
2021-02-17 12:09:51 +08:00
|
|
|
if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) {
|
|
|
|
dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n");
|
|
|
|
rc = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hardware shouldn't allow a ready status but also have failure bits
|
|
|
|
* set. Spit out an error, this should be a bug report
|
|
|
|
*/
|
|
|
|
rc = -EFAULT;
|
|
|
|
if (md_status & CXLMDEV_DEV_FATAL) {
|
|
|
|
dev_err(dev, "mbox: reported ready, but fatal\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (md_status & CXLMDEV_FW_HALT) {
|
|
|
|
dev_err(dev, "mbox: reported ready, but halted\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (CXLMDEV_RESET_NEEDED(md_status)) {
|
|
|
|
dev_err(dev, "mbox: reported ready, but reset needed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* with lock held */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
mutex_unlock(&cxlm->mbox_mutex);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cxl_mem_mbox_put() - Release exclusive access to the mailbox.
|
|
|
|
* @cxlm: The CXL memory device to communicate with.
|
|
|
|
*
|
|
|
|
* Context: Any context. Expects mbox_mutex to be held.
|
|
|
|
*/
|
|
|
|
static void cxl_mem_mbox_put(struct cxl_mem *cxlm)
|
|
|
|
{
|
|
|
|
mutex_unlock(&cxlm->mbox_mutex);
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:53 +08:00
|
|
|
/**
|
|
|
|
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
|
|
|
|
* @cxlm: The CXL memory device to communicate with.
|
|
|
|
* @cmd: The validated command.
|
|
|
|
* @in_payload: Pointer to userspace's input payload.
|
|
|
|
* @out_payload: Pointer to userspace's output payload.
|
|
|
|
* @size_out: (Input) Max payload size to copy out.
|
|
|
|
* (Output) Payload size hardware generated.
|
|
|
|
* @retval: Hardware generated return code from the operation.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* * %0 - Mailbox transaction succeeded. This implies the mailbox
|
|
|
|
* protocol completed successfully not that the operation itself
|
|
|
|
* was successful.
|
|
|
|
* * %-ENOMEM - Couldn't allocate a bounce buffer.
|
|
|
|
* * %-EFAULT - Something happened with copy_to/from_user.
|
|
|
|
* * %-EINTR - Mailbox acquisition interrupted.
|
|
|
|
* * %-EXXX - Transaction level failures.
|
|
|
|
*
|
|
|
|
* Creates the appropriate mailbox command and dispatches it on behalf of a
|
|
|
|
* userspace request. The input and output payloads are copied between
|
|
|
|
* userspace.
|
|
|
|
*
|
|
|
|
* See cxl_send_cmd().
|
|
|
|
*/
|
|
|
|
static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
|
|
|
|
const struct cxl_mem_command *cmd,
|
|
|
|
u64 in_payload, u64 out_payload,
|
|
|
|
s32 *size_out, u32 *retval)
|
|
|
|
{
|
|
|
|
struct device *dev = &cxlm->pdev->dev;
|
|
|
|
struct mbox_cmd mbox_cmd = {
|
|
|
|
.opcode = cmd->opcode,
|
|
|
|
.size_in = cmd->info.size_in,
|
|
|
|
.size_out = cmd->info.size_out,
|
|
|
|
};
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (cmd->info.size_out) {
|
|
|
|
mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL);
|
|
|
|
if (!mbox_cmd.payload_out)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd->info.size_in) {
|
|
|
|
mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
|
|
|
|
cmd->info.size_in);
|
2021-02-21 11:58:46 +08:00
|
|
|
if (IS_ERR(mbox_cmd.payload_in)) {
|
|
|
|
kvfree(mbox_cmd.payload_out);
|
2021-02-17 12:09:53 +08:00
|
|
|
return PTR_ERR(mbox_cmd.payload_in);
|
2021-02-21 11:58:46 +08:00
|
|
|
}
|
2021-02-17 12:09:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rc = cxl_mem_mbox_get(cxlm);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
dev_dbg(dev,
|
|
|
|
"Submitting %s command for user\n"
|
|
|
|
"\topcode: %x\n"
|
|
|
|
"\tsize: %ub\n",
|
|
|
|
cxl_command_names[cmd->info.id].name, mbox_cmd.opcode,
|
|
|
|
cmd->info.size_in);
|
|
|
|
|
2021-02-17 12:09:54 +08:00
|
|
|
dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW,
|
|
|
|
"raw command path used\n");
|
|
|
|
|
2021-02-17 12:09:53 +08:00
|
|
|
rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd);
|
|
|
|
cxl_mem_mbox_put(cxlm);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @size_out contains the max size that's allowed to be written back out
|
|
|
|
* to userspace. While the payload may have written more output than
|
|
|
|
* this it will have to be ignored.
|
|
|
|
*/
|
|
|
|
if (mbox_cmd.size_out) {
|
|
|
|
dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out,
|
|
|
|
"Invalid return size\n");
|
|
|
|
if (copy_to_user(u64_to_user_ptr(out_payload),
|
|
|
|
mbox_cmd.payload_out, mbox_cmd.size_out)) {
|
|
|
|
rc = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*size_out = mbox_cmd.size_out;
|
|
|
|
*retval = mbox_cmd.return_code;
|
|
|
|
|
|
|
|
out:
|
|
|
|
kvfree(mbox_cmd.payload_in);
|
|
|
|
kvfree(mbox_cmd.payload_out);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:54 +08:00
|
|
|
static bool cxl_mem_raw_command_allowed(u16 opcode)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
|
|
|
|
return false;
|
|
|
|
|
2021-09-04 10:20:45 +08:00
|
|
|
if (security_locked_down(LOCKDOWN_PCI_ACCESS))
|
2021-02-17 12:09:54 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (cxl_raw_allow_all)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (cxl_is_security_command(opcode))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
|
|
|
|
if (cxl_disabled_raw_commands[i] == opcode)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:53 +08:00
|
|
|
/**
|
|
|
|
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
|
|
|
|
* @cxlm: &struct cxl_mem device whose mailbox will be used.
|
|
|
|
* @send_cmd: &struct cxl_send_command copied in from userspace.
|
|
|
|
* @out_cmd: Sanitized and populated &struct cxl_mem_command.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* * %0 - @out_cmd is ready to send.
|
|
|
|
* * %-ENOTTY - Invalid command specified.
|
|
|
|
* * %-EINVAL - Reserved fields or invalid values were used.
|
|
|
|
* * %-ENOMEM - Input or output buffer wasn't sized properly.
|
2021-02-17 12:09:54 +08:00
|
|
|
* * %-EPERM - Attempted to use a protected command.
|
2021-02-17 12:09:53 +08:00
|
|
|
*
|
|
|
|
* The result of this command is a fully validated command in @out_cmd that is
|
|
|
|
* safe to send to the hardware.
|
|
|
|
*
|
|
|
|
* See handle_mailbox_cmd_from_user()
|
|
|
|
*/
|
|
|
|
static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
|
|
|
|
const struct cxl_send_command *send_cmd,
|
|
|
|
struct cxl_mem_command *out_cmd)
|
|
|
|
{
|
|
|
|
const struct cxl_command_info *info;
|
|
|
|
struct cxl_mem_command *c;
|
|
|
|
|
|
|
|
if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
|
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The user can never specify an input payload larger than what hardware
|
|
|
|
* supports, but output can be arbitrarily large (simply write out as
|
|
|
|
* much data as the hardware provides).
|
|
|
|
*/
|
|
|
|
if (send_cmd->in.size > cxlm->payload_size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-02-17 12:09:54 +08:00
|
|
|
/*
|
|
|
|
* Checks are bypassed for raw commands but a WARN/taint will occur
|
|
|
|
* later in the callchain
|
|
|
|
*/
|
|
|
|
if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) {
|
|
|
|
const struct cxl_mem_command temp = {
|
|
|
|
.info = {
|
|
|
|
.id = CXL_MEM_COMMAND_ID_RAW,
|
|
|
|
.flags = 0,
|
|
|
|
.size_in = send_cmd->in.size,
|
|
|
|
.size_out = send_cmd->out.size,
|
|
|
|
},
|
|
|
|
.opcode = send_cmd->raw.opcode
|
|
|
|
};
|
|
|
|
|
|
|
|
if (send_cmd->raw.rsvd)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlike supported commands, the output size of RAW commands
|
|
|
|
* gets passed along without further checking, so it must be
|
|
|
|
* validated here.
|
|
|
|
*/
|
|
|
|
if (send_cmd->out.size > cxlm->payload_size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
memcpy(out_cmd, &temp, sizeof(temp));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:53 +08:00
|
|
|
if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (send_cmd->rsvd)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (send_cmd->in.rsvd || send_cmd->out.rsvd)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Convert user's command into the internal representation */
|
|
|
|
c = &mem_commands[send_cmd->id];
|
|
|
|
info = &c->info;
|
|
|
|
|
2021-02-17 12:09:55 +08:00
|
|
|
/* Check that the command is enabled for hardware */
|
|
|
|
if (!test_bit(info->id, cxlm->enabled_cmds))
|
|
|
|
return -ENOTTY;
|
|
|
|
|
2021-02-17 12:09:53 +08:00
|
|
|
/* Check the input buffer is the expected size */
|
|
|
|
if (info->size_in >= 0 && info->size_in != send_cmd->in.size)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Check the output buffer is at least large enough */
|
|
|
|
if (info->size_out >= 0 && send_cmd->out.size < info->size_out)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
memcpy(out_cmd, c, sizeof(*c));
|
|
|
|
out_cmd->info.size_in = send_cmd->in.size;
|
|
|
|
/*
|
|
|
|
* XXX: out_cmd->info.size_out will be controlled by the driver, and the
|
|
|
|
* specified number of bytes @send_cmd->out.size will be copied back out
|
|
|
|
* to userspace.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_query_cmd(struct cxl_memdev *cxlmd,
|
|
|
|
struct cxl_mem_query_commands __user *q)
|
|
|
|
{
|
|
|
|
struct device *dev = &cxlmd->dev;
|
|
|
|
struct cxl_mem_command *cmd;
|
|
|
|
u32 n_commands;
|
|
|
|
int j = 0;
|
|
|
|
|
|
|
|
dev_dbg(dev, "Query IOCTL\n");
|
|
|
|
|
|
|
|
if (get_user(n_commands, &q->n_commands))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* returns the total number if 0 elements are requested. */
|
|
|
|
if (n_commands == 0)
|
|
|
|
return put_user(cxl_cmd_count, &q->n_commands);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* otherwise, return max(n_commands, total commands) cxl_command_info
|
|
|
|
* structures.
|
|
|
|
*/
|
|
|
|
cxl_for_each_cmd(cmd) {
|
|
|
|
const struct cxl_command_info *info = &cmd->info;
|
|
|
|
|
|
|
|
if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (j == n_commands)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_send_cmd(struct cxl_memdev *cxlmd,
|
|
|
|
struct cxl_send_command __user *s)
|
|
|
|
{
|
|
|
|
struct cxl_mem *cxlm = cxlmd->cxlm;
|
|
|
|
struct device *dev = &cxlmd->dev;
|
|
|
|
struct cxl_send_command send;
|
|
|
|
struct cxl_mem_command c;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
dev_dbg(dev, "Send IOCTL\n");
|
|
|
|
|
|
|
|
if (copy_from_user(&send, s, sizeof(send)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/* Prepare to handle a full payload for variable sized output */
|
|
|
|
if (c.info.size_out < 0)
|
|
|
|
c.info.size_out = cxlm->payload_size;
|
|
|
|
|
|
|
|
rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload,
|
|
|
|
send.out.payload, &send.out.size,
|
|
|
|
&send.retval);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2021-02-19 17:54:38 +08:00
|
|
|
if (copy_to_user(s, &send, sizeof(send)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
2021-02-17 12:09:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
switch (cmd) {
|
|
|
|
case CXL_MEM_QUERY_COMMANDS:
|
|
|
|
return cxl_query_cmd(cxlmd, (void __user *)arg);
|
|
|
|
case CXL_MEM_SEND_COMMAND:
|
|
|
|
return cxl_send_cmd(cxlmd, (void __user *)arg);
|
|
|
|
default:
|
|
|
|
return -ENOTTY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:52 +08:00
|
|
|
static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
2021-04-01 22:33:19 +08:00
|
|
|
struct cxl_memdev *cxlmd = file->private_data;
|
|
|
|
int rc = -ENXIO;
|
2021-02-17 12:09:52 +08:00
|
|
|
|
2021-04-01 22:33:19 +08:00
|
|
|
down_read(&cxl_memdev_rwsem);
|
|
|
|
if (cxlmd->cxlm)
|
|
|
|
rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
|
|
|
|
up_read(&cxl_memdev_rwsem);
|
2021-02-17 12:09:52 +08:00
|
|
|
|
2021-04-01 22:33:19 +08:00
|
|
|
return rc;
|
|
|
|
}
|
2021-02-17 12:09:52 +08:00
|
|
|
|
2021-04-01 22:33:19 +08:00
|
|
|
static int cxl_memdev_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct cxl_memdev *cxlmd =
|
|
|
|
container_of(inode->i_cdev, typeof(*cxlmd), cdev);
|
2021-02-17 12:09:52 +08:00
|
|
|
|
2021-04-01 22:33:19 +08:00
|
|
|
get_device(&cxlmd->dev);
|
|
|
|
file->private_data = cxlmd;
|
2021-02-17 12:09:52 +08:00
|
|
|
|
2021-04-01 22:33:19 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_memdev_release_file(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct cxl_memdev *cxlmd =
|
|
|
|
container_of(inode->i_cdev, typeof(*cxlmd), cdev);
|
|
|
|
|
|
|
|
put_device(&cxlmd->dev);
|
|
|
|
|
|
|
|
return 0;
|
2021-02-17 12:09:52 +08:00
|
|
|
}
|
|
|
|
|
2021-08-03 01:29:59 +08:00
|
|
|
static void cxl_memdev_shutdown(struct device *dev)
|
|
|
|
{
|
|
|
|
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
|
|
|
|
|
|
|
down_write(&cxl_memdev_rwsem);
|
|
|
|
cxlmd->cxlm = NULL;
|
|
|
|
up_write(&cxl_memdev_rwsem);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct cdevm_file_operations cxl_memdev_fops = {
|
|
|
|
.fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.unlocked_ioctl = cxl_memdev_ioctl,
|
|
|
|
.open = cxl_memdev_open,
|
|
|
|
.release = cxl_memdev_release_file,
|
|
|
|
.compat_ioctl = compat_ptr_ioctl,
|
|
|
|
.llseek = noop_llseek,
|
|
|
|
},
|
|
|
|
.shutdown = cxl_memdev_shutdown,
|
2021-02-17 12:09:52 +08:00
|
|
|
};
|
|
|
|
|
2021-02-17 12:09:55 +08:00
|
|
|
static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
|
|
|
|
{
|
|
|
|
struct cxl_mem_command *c;
|
|
|
|
|
|
|
|
cxl_for_each_cmd(c)
|
|
|
|
if (c->opcode == opcode)
|
|
|
|
return c;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:51 +08:00
|
|
|
/**
|
|
|
|
* cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device.
|
|
|
|
* @cxlm: The CXL memory device to communicate with.
|
|
|
|
* @opcode: Opcode for the mailbox command.
|
|
|
|
* @in: The input payload for the mailbox command.
|
|
|
|
* @in_size: The length of the input payload
|
|
|
|
* @out: Caller allocated buffer for the output.
|
|
|
|
* @out_size: Expected size of output.
|
|
|
|
*
|
|
|
|
* Context: Any context. Will acquire and release mbox_mutex.
|
|
|
|
* Return:
|
|
|
|
* * %>=0 - Number of bytes returned in @out.
|
|
|
|
* * %-E2BIG - Payload is too large for hardware.
|
|
|
|
* * %-EBUSY - Couldn't acquire exclusive mailbox access.
|
|
|
|
* * %-EFAULT - Hardware error occurred.
|
|
|
|
* * %-ENXIO - Command completed, but device reported an error.
|
|
|
|
* * %-EIO - Unexpected output size.
|
|
|
|
*
|
|
|
|
* Mailbox commands may execute successfully yet the device itself reported an
|
|
|
|
* error. While this distinction can be useful for commands from userspace, the
|
2021-02-17 12:09:55 +08:00
|
|
|
* kernel will only be able to use results when both are successful.
|
2021-02-17 12:09:51 +08:00
|
|
|
*
|
|
|
|
* See __cxl_mem_mbox_send_cmd()
|
|
|
|
*/
|
|
|
|
static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode,
|
|
|
|
void *in, size_t in_size,
|
|
|
|
void *out, size_t out_size)
|
|
|
|
{
|
2021-02-17 12:09:55 +08:00
|
|
|
const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
|
2021-02-17 12:09:51 +08:00
|
|
|
struct mbox_cmd mbox_cmd = {
|
|
|
|
.opcode = opcode,
|
|
|
|
.payload_in = in,
|
|
|
|
.size_in = in_size,
|
|
|
|
.size_out = out_size,
|
|
|
|
.payload_out = out,
|
|
|
|
};
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (out_size > cxlm->payload_size)
|
|
|
|
return -E2BIG;
|
|
|
|
|
|
|
|
rc = cxl_mem_mbox_get(cxlm);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd);
|
|
|
|
cxl_mem_mbox_put(cxlm);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/* TODO: Map return code to proper kernel style errno */
|
|
|
|
if (mbox_cmd.return_code != CXL_MBOX_SUCCESS)
|
|
|
|
return -ENXIO;
|
|
|
|
|
2021-02-17 12:09:55 +08:00
|
|
|
/*
|
|
|
|
* Variable sized commands can't be validated and so it's up to the
|
|
|
|
* caller to do that if they wish.
|
|
|
|
*/
|
|
|
|
if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size)
|
2021-02-17 12:09:51 +08:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm)
|
|
|
|
{
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
|
2021-02-17 12:09:51 +08:00
|
|
|
|
|
|
|
cxlm->payload_size =
|
|
|
|
1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
|
|
|
|
*
|
|
|
|
* If the size is too small, mandatory commands will not work and so
|
|
|
|
* there's no point in going forward. If the size is too large, there's
|
|
|
|
* no harm is soft limiting it.
|
|
|
|
*/
|
|
|
|
cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M);
|
|
|
|
if (cxlm->payload_size < 256) {
|
|
|
|
dev_err(&cxlm->pdev->dev, "Mailbox is too small (%zub)",
|
|
|
|
cxlm->payload_size);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(&cxlm->pdev->dev, "Mailbox payload sized %zu",
|
|
|
|
cxlm->payload_size);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-04-08 06:26:20 +08:00
|
|
|
static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev)
|
2021-02-17 12:09:51 +08:00
|
|
|
{
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
struct cxl_mem *cxlm;
|
|
|
|
|
2021-04-08 06:26:19 +08:00
|
|
|
cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL);
|
2021-02-17 12:09:51 +08:00
|
|
|
if (!cxlm) {
|
|
|
|
dev_err(dev, "No memory available\n");
|
2021-04-08 06:26:20 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_init(&cxlm->mbox_mutex);
|
|
|
|
cxlm->pdev = pdev;
|
|
|
|
cxlm->enabled_cmds =
|
|
|
|
devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count),
|
|
|
|
sizeof(unsigned long),
|
|
|
|
GFP_KERNEL | __GFP_ZERO);
|
|
|
|
if (!cxlm->enabled_cmds) {
|
|
|
|
dev_err(dev, "No memory available for bitmap\n");
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2021-02-17 12:09:51 +08:00
|
|
|
}
|
|
|
|
|
2021-04-08 06:26:20 +08:00
|
|
|
return cxlm;
|
|
|
|
}
|
|
|
|
|
2021-05-28 08:49:18 +08:00
|
|
|
static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm,
|
|
|
|
u8 bar, u64 offset)
|
2021-04-08 06:26:20 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *pdev = cxlm->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
2021-05-28 08:49:19 +08:00
|
|
|
void __iomem *addr;
|
2021-04-08 06:26:20 +08:00
|
|
|
|
2021-02-17 12:09:51 +08:00
|
|
|
/* Basic sanity check that BAR is big enough */
|
|
|
|
if (pci_resource_len(pdev, bar) < offset) {
|
|
|
|
dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar,
|
|
|
|
&pdev->resource[bar], (unsigned long long)offset);
|
2021-05-21 05:29:53 +08:00
|
|
|
return IOMEM_ERR_PTR(-ENXIO);
|
2021-02-17 12:09:51 +08:00
|
|
|
}
|
|
|
|
|
2021-06-04 08:50:36 +08:00
|
|
|
addr = pci_iomap(pdev, bar, 0);
|
2021-05-28 08:49:19 +08:00
|
|
|
if (!addr) {
|
2021-02-17 12:09:51 +08:00
|
|
|
dev_err(dev, "failed to map registers\n");
|
2021-05-28 08:49:19 +08:00
|
|
|
return addr;
|
2021-02-17 12:09:51 +08:00
|
|
|
}
|
|
|
|
|
2021-05-28 08:49:19 +08:00
|
|
|
dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n",
|
|
|
|
bar, offset);
|
2021-05-21 05:29:53 +08:00
|
|
|
|
2021-06-04 08:50:36 +08:00
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base)
|
|
|
|
{
|
|
|
|
pci_iounmap(cxlm->pdev, base);
|
2021-02-17 12:09:51 +08:00
|
|
|
}
|
2021-02-17 12:09:50 +08:00
|
|
|
|
|
|
|
static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec)
|
|
|
|
{
|
|
|
|
int pos;
|
|
|
|
|
|
|
|
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC);
|
|
|
|
if (!pos)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
while (pos) {
|
|
|
|
u16 vendor, id;
|
|
|
|
|
|
|
|
pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor);
|
|
|
|
pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id);
|
|
|
|
if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id)
|
|
|
|
return pos;
|
|
|
|
|
|
|
|
pos = pci_find_next_ext_capability(pdev, pos,
|
|
|
|
PCI_EXT_CAP_ID_DVSEC);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-04 08:50:36 +08:00
|
|
|
static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base,
|
|
|
|
struct cxl_register_map *map)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = cxlm->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
2021-05-28 08:49:22 +08:00
|
|
|
struct cxl_component_reg_map *comp_map;
|
2021-06-04 08:50:36 +08:00
|
|
|
struct cxl_device_reg_map *dev_map;
|
|
|
|
|
|
|
|
switch (map->reg_type) {
|
2021-05-28 08:49:22 +08:00
|
|
|
case CXL_REGLOC_RBI_COMPONENT:
|
|
|
|
comp_map = &map->component_map;
|
|
|
|
cxl_probe_component_regs(dev, base, comp_map);
|
|
|
|
if (!comp_map->hdm_decoder.valid) {
|
|
|
|
dev_err(dev, "HDM decoder registers not found\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(dev, "Set up component registers\n");
|
|
|
|
break;
|
2021-06-04 08:50:36 +08:00
|
|
|
case CXL_REGLOC_RBI_MEMDEV:
|
|
|
|
dev_map = &map->device_map;
|
|
|
|
cxl_probe_device_regs(dev, base, dev_map);
|
|
|
|
if (!dev_map->status.valid || !dev_map->mbox.valid ||
|
|
|
|
!dev_map->memdev.valid) {
|
|
|
|
dev_err(dev, "registers not found: %s%s%s\n",
|
|
|
|
!dev_map->status.valid ? "status " : "",
|
2021-09-04 10:20:50 +08:00
|
|
|
!dev_map->mbox.valid ? "mbox " : "",
|
|
|
|
!dev_map->memdev.valid ? "memdev " : "");
|
2021-06-04 08:50:36 +08:00
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(dev, "Probing device registers...\n");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = cxlm->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
|
|
|
|
switch (map->reg_type) {
|
2021-05-28 08:49:22 +08:00
|
|
|
case CXL_REGLOC_RBI_COMPONENT:
|
|
|
|
cxl_map_component_regs(pdev, &cxlm->regs.component, map);
|
|
|
|
dev_dbg(dev, "Mapping component registers...\n");
|
|
|
|
break;
|
2021-06-04 08:50:36 +08:00
|
|
|
case CXL_REGLOC_RBI_MEMDEV:
|
|
|
|
cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map);
|
|
|
|
dev_dbg(dev, "Probing device registers...\n");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-05-28 08:49:18 +08:00
|
|
|
static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi,
|
|
|
|
u8 *bar, u64 *offset, u8 *reg_type)
|
|
|
|
{
|
|
|
|
*offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
|
|
|
|
*bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
|
|
|
|
*reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo);
|
|
|
|
}
|
|
|
|
|
2021-04-08 06:26:21 +08:00
|
|
|
/**
|
|
|
|
* cxl_mem_setup_regs() - Setup necessary MMIO.
|
|
|
|
* @cxlm: The CXL memory device to communicate with.
|
|
|
|
*
|
|
|
|
* Return: 0 if all necessary registers mapped.
|
|
|
|
*
|
|
|
|
* A memory device is required by spec to implement a certain set of MMIO
|
|
|
|
* regions. The purpose of this function is to enumerate and map those
|
|
|
|
* registers.
|
|
|
|
*/
|
|
|
|
static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = cxlm->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
u32 regloc_size, regblocks;
|
2021-05-21 05:29:53 +08:00
|
|
|
void __iomem *base;
|
2021-07-17 07:15:47 +08:00
|
|
|
int regloc, i, n_maps;
|
|
|
|
struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES];
|
2021-06-04 08:50:36 +08:00
|
|
|
int ret = 0;
|
2021-04-08 06:26:21 +08:00
|
|
|
|
2021-06-18 08:30:09 +08:00
|
|
|
regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID);
|
2021-04-08 06:26:21 +08:00
|
|
|
if (!regloc) {
|
|
|
|
dev_err(dev, "register location dvsec not found\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2021-05-28 08:49:19 +08:00
|
|
|
if (pci_request_mem_regions(pdev, pci_name(pdev)))
|
|
|
|
return -ENODEV;
|
|
|
|
|
2021-04-08 06:26:21 +08:00
|
|
|
/* Get the size of the Register Locator DVSEC */
|
|
|
|
pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size);
|
|
|
|
regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size);
|
|
|
|
|
|
|
|
regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
|
|
|
|
regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
|
|
|
|
|
2021-07-17 07:15:47 +08:00
|
|
|
for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) {
|
2021-04-08 06:26:21 +08:00
|
|
|
u32 reg_lo, reg_hi;
|
|
|
|
u8 reg_type;
|
2021-05-28 08:49:18 +08:00
|
|
|
u64 offset;
|
|
|
|
u8 bar;
|
2021-04-08 06:26:21 +08:00
|
|
|
|
|
|
|
pci_read_config_dword(pdev, regloc, ®_lo);
|
|
|
|
pci_read_config_dword(pdev, regloc + 4, ®_hi);
|
|
|
|
|
2021-05-28 08:49:18 +08:00
|
|
|
cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset,
|
|
|
|
®_type);
|
|
|
|
|
|
|
|
dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n",
|
|
|
|
bar, offset, reg_type);
|
2021-04-08 06:26:21 +08:00
|
|
|
|
2021-07-17 07:15:46 +08:00
|
|
|
/* Ignore unknown register block types */
|
|
|
|
if (reg_type > CXL_REGLOC_RBI_MEMDEV)
|
|
|
|
continue;
|
|
|
|
|
2021-06-04 08:50:36 +08:00
|
|
|
base = cxl_mem_map_regblock(cxlm, bar, offset);
|
2021-07-17 07:15:47 +08:00
|
|
|
if (!base)
|
|
|
|
return -ENOMEM;
|
2021-04-08 06:26:21 +08:00
|
|
|
|
2021-07-17 07:15:47 +08:00
|
|
|
map = &maps[n_maps];
|
2021-06-04 08:50:36 +08:00
|
|
|
map->barno = bar;
|
|
|
|
map->block_offset = offset;
|
|
|
|
map->reg_type = reg_type;
|
|
|
|
|
|
|
|
ret = cxl_probe_regs(cxlm, base + offset, map);
|
|
|
|
|
|
|
|
/* Always unmap the regblock regardless of probe success */
|
|
|
|
cxl_mem_unmap_regblock(cxlm, base);
|
|
|
|
|
|
|
|
if (ret)
|
2021-07-17 07:15:47 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
n_maps++;
|
2021-04-08 06:26:21 +08:00
|
|
|
}
|
|
|
|
|
2021-06-04 08:53:16 +08:00
|
|
|
pci_release_mem_regions(pdev);
|
|
|
|
|
2021-07-17 07:15:47 +08:00
|
|
|
for (i = 0; i < n_maps; i++) {
|
|
|
|
ret = cxl_map_regs(cxlm, &maps[i]);
|
2021-06-04 08:50:36 +08:00
|
|
|
if (ret)
|
2021-07-17 07:15:47 +08:00
|
|
|
break;
|
2021-04-08 06:26:21 +08:00
|
|
|
}
|
|
|
|
|
2021-06-04 08:50:36 +08:00
|
|
|
return ret;
|
2021-04-08 06:26:21 +08:00
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:55 +08:00
|
|
|
static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
|
|
|
|
{
|
|
|
|
u32 remaining = size;
|
|
|
|
u32 offset = 0;
|
|
|
|
|
|
|
|
while (remaining) {
|
|
|
|
u32 xfer_size = min_t(u32, remaining, cxlm->payload_size);
|
|
|
|
struct cxl_mbox_get_log {
|
|
|
|
uuid_t uuid;
|
|
|
|
__le32 offset;
|
|
|
|
__le32 length;
|
|
|
|
} __packed log = {
|
|
|
|
.uuid = *uuid,
|
|
|
|
.offset = cpu_to_le32(offset),
|
|
|
|
.length = cpu_to_le32(xfer_size)
|
|
|
|
};
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log,
|
|
|
|
sizeof(log), out, xfer_size);
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
out += xfer_size;
|
|
|
|
remaining -= xfer_size;
|
|
|
|
offset += xfer_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cxl_walk_cel() - Walk through the Command Effects Log.
|
|
|
|
* @cxlm: Device.
|
|
|
|
* @size: Length of the Command Effects Log.
|
|
|
|
* @cel: CEL
|
|
|
|
*
|
|
|
|
* Iterate over each entry in the CEL and determine if the driver supports the
|
|
|
|
* command. If so, the command is enabled for the device and can be used later.
|
|
|
|
*/
|
|
|
|
static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
|
|
|
|
{
|
|
|
|
struct cel_entry {
|
|
|
|
__le16 opcode;
|
|
|
|
__le16 effect;
|
|
|
|
} __packed * cel_entry;
|
|
|
|
const int cel_entries = size / sizeof(*cel_entry);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
cel_entry = (struct cel_entry *)cel;
|
|
|
|
|
|
|
|
for (i = 0; i < cel_entries; i++) {
|
|
|
|
u16 opcode = le16_to_cpu(cel_entry[i].opcode);
|
|
|
|
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
|
|
|
|
|
|
|
|
if (!cmd) {
|
|
|
|
dev_dbg(&cxlm->pdev->dev,
|
|
|
|
"Opcode 0x%04x unsupported by driver", opcode);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_bit(cmd->info.id, cxlm->enabled_cmds);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct cxl_mbox_get_supported_logs {
|
|
|
|
__le16 entries;
|
|
|
|
u8 rsvd[6];
|
|
|
|
struct gsl_entry {
|
|
|
|
uuid_t uuid;
|
|
|
|
__le32 size;
|
|
|
|
} __packed entry[];
|
|
|
|
} __packed;
|
|
|
|
|
|
|
|
static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
|
|
|
|
{
|
|
|
|
struct cxl_mbox_get_supported_logs *ret;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ret = kvmalloc(cxlm->payload_size, GFP_KERNEL);
|
|
|
|
if (!ret)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL,
|
|
|
|
0, ret, cxlm->payload_size);
|
|
|
|
if (rc < 0) {
|
|
|
|
kvfree(ret);
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-08-11 02:57:59 +08:00
|
|
|
/**
|
|
|
|
* cxl_mem_get_partition_info - Get partition info
|
|
|
|
* @cxlm: The device to act on
|
|
|
|
* @active_volatile_bytes: returned active volatile capacity
|
|
|
|
* @active_persistent_bytes: returned active persistent capacity
|
|
|
|
* @next_volatile_bytes: return next volatile capacity
|
|
|
|
* @next_persistent_bytes: return next persistent capacity
|
|
|
|
*
|
|
|
|
* Retrieve the current partition info for the device specified. If not 0, the
|
|
|
|
* 'next' values are pending and take affect on next cold reset.
|
|
|
|
*
|
|
|
|
* Return: 0 if no error: or the result of the mailbox command.
|
|
|
|
*
|
|
|
|
* See CXL @8.2.9.5.2.1 Get Partition Info
|
|
|
|
*/
|
|
|
|
static int cxl_mem_get_partition_info(struct cxl_mem *cxlm,
|
|
|
|
u64 *active_volatile_bytes,
|
|
|
|
u64 *active_persistent_bytes,
|
|
|
|
u64 *next_volatile_bytes,
|
|
|
|
u64 *next_persistent_bytes)
|
|
|
|
{
|
|
|
|
struct cxl_mbox_get_partition_info {
|
|
|
|
__le64 active_volatile_cap;
|
|
|
|
__le64 active_persistent_cap;
|
|
|
|
__le64 next_volatile_cap;
|
|
|
|
__le64 next_persistent_cap;
|
|
|
|
} __packed pi;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO,
|
|
|
|
NULL, 0, &pi, sizeof(pi));
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
*active_volatile_bytes = le64_to_cpu(pi.active_volatile_cap);
|
|
|
|
*active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap);
|
|
|
|
*next_volatile_bytes = le64_to_cpu(pi.next_volatile_cap);
|
|
|
|
*next_persistent_bytes = le64_to_cpu(pi.next_volatile_cap);
|
|
|
|
|
|
|
|
*active_volatile_bytes *= CXL_CAPACITY_MULTIPLIER;
|
|
|
|
*active_persistent_bytes *= CXL_CAPACITY_MULTIPLIER;
|
|
|
|
*next_volatile_bytes *= CXL_CAPACITY_MULTIPLIER;
|
|
|
|
*next_persistent_bytes *= CXL_CAPACITY_MULTIPLIER;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:55 +08:00
|
|
|
/**
|
|
|
|
* cxl_mem_enumerate_cmds() - Enumerate commands for a device.
|
|
|
|
* @cxlm: The device.
|
|
|
|
*
|
|
|
|
* Returns 0 if enumerate completed successfully.
|
|
|
|
*
|
|
|
|
* CXL devices have optional support for certain commands. This function will
|
|
|
|
* determine the set of supported commands for the hardware and update the
|
|
|
|
* enabled_cmds bitmap in the @cxlm.
|
|
|
|
*/
|
|
|
|
static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
|
|
|
|
{
|
|
|
|
struct cxl_mbox_get_supported_logs *gsl;
|
|
|
|
struct device *dev = &cxlm->pdev->dev;
|
|
|
|
struct cxl_mem_command *cmd;
|
|
|
|
int i, rc;
|
|
|
|
|
|
|
|
gsl = cxl_get_gsl(cxlm);
|
|
|
|
if (IS_ERR(gsl))
|
|
|
|
return PTR_ERR(gsl);
|
|
|
|
|
|
|
|
rc = -ENOENT;
|
|
|
|
for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
|
|
|
|
u32 size = le32_to_cpu(gsl->entry[i].size);
|
|
|
|
uuid_t uuid = gsl->entry[i].uuid;
|
|
|
|
u8 *log;
|
|
|
|
|
|
|
|
dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
|
|
|
|
|
|
|
|
if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
log = kvmalloc(size, GFP_KERNEL);
|
|
|
|
if (!log) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = cxl_xfer_log(cxlm, &uuid, size, log);
|
|
|
|
if (rc) {
|
|
|
|
kvfree(log);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
cxl_walk_cel(cxlm, size, log);
|
|
|
|
kvfree(log);
|
|
|
|
|
|
|
|
/* In case CEL was bogus, enable some default commands. */
|
|
|
|
cxl_for_each_cmd(cmd)
|
|
|
|
if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
|
|
|
|
set_bit(cmd->info.id, cxlm->enabled_cmds);
|
|
|
|
|
|
|
|
/* Found the required CEL */
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
kvfree(gsl);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:51 +08:00
|
|
|
/**
|
|
|
|
* cxl_mem_identify() - Send the IDENTIFY command to the device.
|
|
|
|
* @cxlm: The device to identify.
|
|
|
|
*
|
|
|
|
* Return: 0 if identify was executed successfully.
|
|
|
|
*
|
|
|
|
* This will dispatch the identify command to the device and on success populate
|
|
|
|
* structures to be exported to sysfs.
|
|
|
|
*/
|
|
|
|
static int cxl_mem_identify(struct cxl_mem *cxlm)
|
|
|
|
{
|
2021-04-17 08:43:30 +08:00
|
|
|
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
|
2021-02-17 12:09:51 +08:00
|
|
|
struct cxl_mbox_identify {
|
|
|
|
char fw_revision[0x10];
|
|
|
|
__le64 total_capacity;
|
|
|
|
__le64 volatile_capacity;
|
|
|
|
__le64 persistent_capacity;
|
|
|
|
__le64 partition_align;
|
|
|
|
__le16 info_event_log_size;
|
|
|
|
__le16 warning_event_log_size;
|
|
|
|
__le16 failure_event_log_size;
|
|
|
|
__le16 fatal_event_log_size;
|
|
|
|
__le32 lsa_size;
|
|
|
|
u8 poison_list_max_mer[3];
|
|
|
|
__le16 inject_poison_limit;
|
|
|
|
u8 poison_caps;
|
|
|
|
u8 qos_telemetry_caps;
|
|
|
|
} __packed id;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
|
|
|
|
sizeof(id));
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
|
2021-06-18 06:16:18 +08:00
|
|
|
cxlm->total_bytes = le64_to_cpu(id.total_capacity);
|
|
|
|
cxlm->total_bytes *= CXL_CAPACITY_MULTIPLIER;
|
|
|
|
|
|
|
|
cxlm->volatile_only_bytes = le64_to_cpu(id.volatile_capacity);
|
|
|
|
cxlm->volatile_only_bytes *= CXL_CAPACITY_MULTIPLIER;
|
|
|
|
|
|
|
|
cxlm->persistent_only_bytes = le64_to_cpu(id.persistent_capacity);
|
|
|
|
cxlm->persistent_only_bytes *= CXL_CAPACITY_MULTIPLIER;
|
|
|
|
|
|
|
|
cxlm->partition_align_bytes = le64_to_cpu(id.partition_align);
|
|
|
|
cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER;
|
|
|
|
|
|
|
|
dev_dbg(&cxlm->pdev->dev, "Identify Memory Device\n"
|
|
|
|
" total_bytes = %#llx\n"
|
|
|
|
" volatile_only_bytes = %#llx\n"
|
|
|
|
" persistent_only_bytes = %#llx\n"
|
|
|
|
" partition_align_bytes = %#llx\n",
|
|
|
|
cxlm->total_bytes,
|
|
|
|
cxlm->volatile_only_bytes,
|
|
|
|
cxlm->persistent_only_bytes,
|
|
|
|
cxlm->partition_align_bytes);
|
|
|
|
|
2021-08-11 02:57:59 +08:00
|
|
|
cxlm->lsa_size = le32_to_cpu(id.lsa_size);
|
|
|
|
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_mem_create_range_info(struct cxl_mem *cxlm)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (cxlm->partition_align_bytes == 0) {
|
|
|
|
cxlm->ram_range.start = 0;
|
|
|
|
cxlm->ram_range.end = cxlm->volatile_only_bytes - 1;
|
2021-06-18 06:16:20 +08:00
|
|
|
cxlm->pmem_range.start = cxlm->volatile_only_bytes;
|
|
|
|
cxlm->pmem_range.end = cxlm->volatile_only_bytes +
|
|
|
|
cxlm->persistent_only_bytes - 1;
|
2021-08-11 02:57:59 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = cxl_mem_get_partition_info(cxlm,
|
|
|
|
&cxlm->active_volatile_bytes,
|
|
|
|
&cxlm->active_persistent_bytes,
|
|
|
|
&cxlm->next_volatile_bytes,
|
|
|
|
&cxlm->next_persistent_bytes);
|
|
|
|
if (rc < 0) {
|
|
|
|
dev_err(&cxlm->pdev->dev, "Failed to query partition information\n");
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(&cxlm->pdev->dev, "Get Partition Info\n"
|
|
|
|
" active_volatile_bytes = %#llx\n"
|
|
|
|
" active_persistent_bytes = %#llx\n"
|
|
|
|
" next_volatile_bytes = %#llx\n"
|
|
|
|
" next_persistent_bytes = %#llx\n",
|
|
|
|
cxlm->active_volatile_bytes,
|
|
|
|
cxlm->active_persistent_bytes,
|
|
|
|
cxlm->next_volatile_bytes,
|
|
|
|
cxlm->next_persistent_bytes);
|
|
|
|
|
2021-02-17 12:09:51 +08:00
|
|
|
cxlm->ram_range.start = 0;
|
2021-08-11 02:57:59 +08:00
|
|
|
cxlm->ram_range.end = cxlm->active_volatile_bytes - 1;
|
2021-02-17 12:09:51 +08:00
|
|
|
|
2021-06-18 06:16:20 +08:00
|
|
|
cxlm->pmem_range.start = cxlm->active_volatile_bytes;
|
|
|
|
cxlm->pmem_range.end = cxlm->active_volatile_bytes +
|
|
|
|
cxlm->active_persistent_bytes - 1;
|
2021-02-17 12:09:51 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:50 +08:00
|
|
|
static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
|
|
{
|
2021-06-16 07:36:31 +08:00
|
|
|
struct cxl_memdev *cxlmd;
|
2021-04-08 06:26:20 +08:00
|
|
|
struct cxl_mem *cxlm;
|
2021-04-08 06:26:21 +08:00
|
|
|
int rc;
|
2021-02-17 12:09:51 +08:00
|
|
|
|
|
|
|
rc = pcim_enable_device(pdev);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2021-02-17 12:09:50 +08:00
|
|
|
|
2021-04-08 06:26:20 +08:00
|
|
|
cxlm = cxl_mem_create(pdev);
|
|
|
|
if (IS_ERR(cxlm))
|
|
|
|
return PTR_ERR(cxlm);
|
|
|
|
|
2021-02-17 12:09:51 +08:00
|
|
|
rc = cxl_mem_setup_regs(cxlm);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
rc = cxl_mem_setup_mailbox(cxlm);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2021-02-17 12:09:55 +08:00
|
|
|
rc = cxl_mem_enumerate_cmds(cxlm);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2021-02-17 12:09:52 +08:00
|
|
|
rc = cxl_mem_identify(cxlm);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2021-08-11 02:57:59 +08:00
|
|
|
rc = cxl_mem_create_range_info(cxlm);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2021-08-03 01:29:59 +08:00
|
|
|
cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops);
|
2021-06-16 07:36:31 +08:00
|
|
|
if (IS_ERR(cxlmd))
|
|
|
|
return PTR_ERR(cxlmd);
|
|
|
|
|
|
|
|
if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
|
|
|
|
rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
|
|
|
|
|
|
|
|
return rc;
|
2021-02-17 12:09:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct pci_device_id cxl_mem_pci_tbl[] = {
|
|
|
|
/* PCI class code for CXL.mem Type-3 Devices */
|
|
|
|
{ PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
|
|
|
|
{ /* terminate list */ },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
|
|
|
|
|
|
|
|
static struct pci_driver cxl_mem_driver = {
|
|
|
|
.name = KBUILD_MODNAME,
|
|
|
|
.id_table = cxl_mem_pci_tbl,
|
|
|
|
.probe = cxl_mem_probe,
|
|
|
|
.driver = {
|
|
|
|
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2021-02-17 12:09:52 +08:00
|
|
|
static __init int cxl_mem_init(void)
|
|
|
|
{
|
2021-02-17 12:09:54 +08:00
|
|
|
struct dentry *mbox_debugfs;
|
2021-02-17 12:09:52 +08:00
|
|
|
int rc;
|
|
|
|
|
cxl/mem: Introduce 'struct cxl_regs' for "composable" CXL devices
CXL MMIO register blocks are organized by device type and capabilities.
There are Component registers, Device registers (yes, an ambiguous
name), and Memory Device registers (a specific extension of Device
registers).
It is possible for a given device instance (endpoint or port) to
implement register sets from multiple of the above categories.
The driver code that enumerates and maps the registers is type specific
so it is useful to have a dedicated type and helpers for each block
type.
At the same time, once the registers are mapped the origin type does not
matter. It is overly pedantic to reference the register block type in
code that is using the registers.
In preparation for the endpoint driver to incorporate Component registers
into its MMIO operations reorganize the registers to allow typed
enumeration + mapping, but anonymous usage. With the end state of
'struct cxl_regs' to be:
struct cxl_regs {
union {
struct {
CXL_DEVICE_REGS();
};
struct cxl_device_regs device_regs;
};
union {
struct {
CXL_COMPONENT_REGS();
};
struct cxl_component_regs component_regs;
};
};
With this arrangement the driver can share component init code with
ports, but when using the registers it can directly reference the
component register block type by name without the 'component_regs'
prefix.
So, map + enumerate can be shared across drivers of different CXL
classes e.g.:
void cxl_setup_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_regs *regs);
void cxl_setup_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_regs *regs);
...while inline usage in the driver need not indicate where the
registers came from:
readl(cxlm->regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.hdm + HDM_OFFSET);
...instead of:
readl(cxlm->regs.device_regs.mbox + MBOX_OFFSET);
readl(cxlm->regs.component_regs.hdm + HDM_OFFSET);
This complexity of the definition in .h yields improvement in code
readability in .c while maintaining type-safety for organization of
setup code. It prepares the implementation to maintain organization in
the face of CXL devices that compose register interfaces consisting of
multiple types.
Given that this new container is named 'regs' rename the common register
base pointer @base, and fixup the kernel-doc for the missing @cxlmd
description.
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/162096971451.1865304.13540251513463515153.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2021-05-14 13:21:54 +08:00
|
|
|
/* Double check the anonymous union trickery in struct cxl_regs */
|
|
|
|
BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
|
|
|
|
offsetof(struct cxl_regs, device_regs.memdev));
|
|
|
|
|
2021-02-17 12:09:52 +08:00
|
|
|
rc = pci_register_driver(&cxl_mem_driver);
|
2021-08-03 01:30:05 +08:00
|
|
|
if (rc)
|
2021-02-17 12:09:52 +08:00
|
|
|
return rc;
|
|
|
|
|
2021-02-17 12:09:54 +08:00
|
|
|
cxl_debugfs = debugfs_create_dir("cxl", NULL);
|
|
|
|
mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
|
|
|
|
debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
|
|
|
|
&cxl_raw_allow_all);
|
|
|
|
|
2021-02-17 12:09:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __exit void cxl_mem_exit(void)
|
|
|
|
{
|
2021-02-17 12:09:54 +08:00
|
|
|
debugfs_remove_recursive(cxl_debugfs);
|
2021-02-17 12:09:52 +08:00
|
|
|
pci_unregister_driver(&cxl_mem_driver);
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:09:50 +08:00
|
|
|
MODULE_LICENSE("GPL v2");
|
2021-02-17 12:09:52 +08:00
|
|
|
module_init(cxl_mem_init);
|
|
|
|
module_exit(cxl_mem_exit);
|
|
|
|
MODULE_IMPORT_NS(CXL);
|