2022-02-02 05:07:51 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
|
|
|
#include "cxlmem.h"
|
|
|
|
#include "cxlpci.h"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DOC: cxl port
|
|
|
|
*
|
|
|
|
* The port driver enumerates dport via PCI and scans for HDM
|
|
|
|
* (Host-managed-Device-Memory) decoder resources via the
|
|
|
|
* @component_reg_phys value passed in by the agent that registered the
|
|
|
|
* port. All descendant ports of a CXL root port (described by platform
|
|
|
|
* firmware) are managed in this drivers context. Each driver instance
|
|
|
|
* is responsible for tearing down the driver context of immediate
|
|
|
|
* descendant ports. The locking for this is validated by
|
|
|
|
* CONFIG_PROVE_CXL_LOCKING.
|
|
|
|
*
|
|
|
|
* The primary service this driver provides is presenting APIs to other
|
|
|
|
* drivers to utilize the decoders, and indicating to userspace (via bind
|
|
|
|
* status) the connectivity of the CXL.mem protocol throughout the
|
|
|
|
* PCIe topology.
|
|
|
|
*/
|
|
|
|
|
2022-02-04 23:18:31 +08:00
|
|
|
static void schedule_detach(void *cxlmd)
|
|
|
|
{
|
|
|
|
schedule_cxl_memdev_detach(cxlmd);
|
|
|
|
}
|
|
|
|
|
2023-02-11 09:31:17 +08:00
|
|
|
static int discover_region(struct device *dev, void *root)
|
|
|
|
{
|
|
|
|
struct cxl_endpoint_decoder *cxled;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!is_endpoint_decoder(dev))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cxled = to_cxl_endpoint_decoder(dev);
|
|
|
|
if ((cxled->cxld.flags & CXL_DECODER_F_ENABLE) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (cxled->state != CXL_DECODER_STATE_AUTO)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Region enumeration is opportunistic, if this add-event fails,
|
|
|
|
* continue to the next endpoint decoder.
|
|
|
|
*/
|
|
|
|
rc = cxl_add_to_region(root, cxled);
|
|
|
|
if (rc)
|
|
|
|
dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
|
|
|
|
cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-02-10 17:06:33 +08:00
|
|
|
static int cxl_switch_port_probe(struct cxl_port *port)
|
2022-02-02 05:07:51 +08:00
|
|
|
{
|
|
|
|
struct cxl_hdm *cxlhdm;
|
2023-06-16 03:53:40 +08:00
|
|
|
int rc;
|
2022-05-19 07:35:17 +08:00
|
|
|
|
2023-06-16 03:53:40 +08:00
|
|
|
rc = devm_cxl_port_enumerate_dports(port);
|
|
|
|
if (rc < 0)
|
2023-05-18 11:19:43 +08:00
|
|
|
return rc;
|
|
|
|
|
2023-06-16 03:53:40 +08:00
|
|
|
cxlhdm = devm_cxl_setup_hdm(port, NULL);
|
2023-04-15 02:54:11 +08:00
|
|
|
if (!IS_ERR(cxlhdm))
|
|
|
|
return devm_cxl_enumerate_decoders(cxlhdm, NULL);
|
|
|
|
|
|
|
|
if (PTR_ERR(cxlhdm) != -ENODEV) {
|
|
|
|
dev_err(&port->dev, "Failed to map HDM decoder capability\n");
|
2022-05-19 07:35:17 +08:00
|
|
|
return PTR_ERR(cxlhdm);
|
2023-04-15 02:54:11 +08:00
|
|
|
}
|
|
|
|
|
2023-06-16 03:53:40 +08:00
|
|
|
if (rc == 1) {
|
2023-04-15 02:54:11 +08:00
|
|
|
dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
|
|
|
|
return devm_cxl_add_passthrough_decoder(port);
|
|
|
|
}
|
2022-05-19 07:35:17 +08:00
|
|
|
|
2023-04-15 02:54:11 +08:00
|
|
|
dev_err(&port->dev, "HDM decoder capability not found\n");
|
|
|
|
return -ENXIO;
|
2023-02-10 17:06:33 +08:00
|
|
|
}
|
2022-02-04 23:18:31 +08:00
|
|
|
|
2023-02-10 17:06:33 +08:00
|
|
|
static int cxl_endpoint_port_probe(struct cxl_port *port)
|
|
|
|
{
|
2023-04-04 05:33:48 +08:00
|
|
|
struct cxl_endpoint_dvsec_info info = { .port = port };
|
2023-06-23 04:55:01 +08:00
|
|
|
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
|
2023-02-10 17:06:33 +08:00
|
|
|
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
|
|
|
struct cxl_hdm *cxlhdm;
|
2023-02-11 09:31:17 +08:00
|
|
|
struct cxl_port *root;
|
2023-02-10 17:06:33 +08:00
|
|
|
int rc;
|
2022-07-20 04:52:49 +08:00
|
|
|
|
2023-02-15 08:06:10 +08:00
|
|
|
rc = cxl_dvsec_rr_decode(cxlds->dev, cxlds->cxl_dvsec, &info);
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
2022-05-19 07:35:17 +08:00
|
|
|
|
2023-02-15 03:41:30 +08:00
|
|
|
cxlhdm = devm_cxl_setup_hdm(port, &info);
|
2023-06-23 04:55:07 +08:00
|
|
|
if (IS_ERR(cxlhdm)) {
|
|
|
|
if (PTR_ERR(cxlhdm) == -ENODEV)
|
|
|
|
dev_err(&port->dev, "HDM decoder registers not found\n");
|
2023-02-10 17:06:33 +08:00
|
|
|
return PTR_ERR(cxlhdm);
|
2023-06-23 04:55:07 +08:00
|
|
|
}
|
2022-05-19 07:35:11 +08:00
|
|
|
|
2023-02-10 17:06:33 +08:00
|
|
|
/* Cache the data early to ensure is_visible() works */
|
|
|
|
read_cdat_data(port);
|
2022-05-19 07:35:11 +08:00
|
|
|
|
2023-02-10 17:06:33 +08:00
|
|
|
get_device(&cxlmd->dev);
|
|
|
|
rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2022-02-04 23:18:31 +08:00
|
|
|
|
2023-02-15 08:06:10 +08:00
|
|
|
rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
|
2023-02-10 17:06:33 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2023-02-15 03:41:24 +08:00
|
|
|
rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
|
2023-02-11 09:31:17 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This can't fail in practice as CXL root exit unregisters all
|
|
|
|
* descendant ports and that in turn synchronizes with cxl_port_probe()
|
|
|
|
*/
|
2023-04-04 05:39:16 +08:00
|
|
|
root = find_cxl_root(port);
|
2023-02-11 09:31:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that all endpoint decoders are successfully enumerated, try to
|
|
|
|
* assemble regions from committed decoders
|
|
|
|
*/
|
|
|
|
device_for_each_child(&port->dev, root, discover_region);
|
|
|
|
put_device(&root->dev);
|
|
|
|
|
|
|
|
return 0;
|
2023-02-10 17:06:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cxl_port_probe(struct device *dev)
|
|
|
|
{
|
|
|
|
struct cxl_port *port = to_cxl_port(dev);
|
|
|
|
|
|
|
|
if (is_cxl_endpoint(port))
|
|
|
|
return cxl_endpoint_port_probe(port);
|
|
|
|
return cxl_switch_port_probe(port);
|
2022-02-02 05:07:51 +08:00
|
|
|
}
|
|
|
|
|
2022-07-20 04:52:49 +08:00
|
|
|
static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
|
|
|
|
struct bin_attribute *bin_attr, char *buf,
|
|
|
|
loff_t offset, size_t count)
|
|
|
|
{
|
|
|
|
struct device *dev = kobj_to_dev(kobj);
|
|
|
|
struct cxl_port *port = to_cxl_port(dev);
|
|
|
|
|
|
|
|
if (!port->cdat_available)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
if (!port->cdat.table)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return memory_read_from_buffer(buf, count, &offset,
|
|
|
|
port->cdat.table,
|
|
|
|
port->cdat.length);
|
|
|
|
}
|
|
|
|
|
|
|
|
static BIN_ATTR_ADMIN_RO(CDAT, 0);
|
|
|
|
|
|
|
|
static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
|
|
|
|
struct bin_attribute *attr, int i)
|
|
|
|
{
|
|
|
|
struct device *dev = kobj_to_dev(kobj);
|
|
|
|
struct cxl_port *port = to_cxl_port(dev);
|
|
|
|
|
|
|
|
if ((attr == &bin_attr_CDAT) && port->cdat_available)
|
|
|
|
return attr->attr.mode;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct bin_attribute *cxl_cdat_bin_attributes[] = {
|
|
|
|
&bin_attr_CDAT,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group cxl_cdat_attribute_group = {
|
|
|
|
.bin_attrs = cxl_cdat_bin_attributes,
|
|
|
|
.is_bin_visible = cxl_port_bin_attr_is_visible,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *cxl_port_attribute_groups[] = {
|
|
|
|
&cxl_cdat_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2022-02-02 05:07:51 +08:00
|
|
|
static struct cxl_driver cxl_port_driver = {
|
|
|
|
.name = "cxl_port",
|
|
|
|
.probe = cxl_port_probe,
|
|
|
|
.id = CXL_DEVICE_PORT,
|
2022-07-20 04:52:49 +08:00
|
|
|
.drv = {
|
|
|
|
.dev_groups = cxl_port_attribute_groups,
|
|
|
|
},
|
2022-02-02 05:07:51 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
module_cxl_driver(cxl_port_driver);
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_IMPORT_NS(CXL);
|
|
|
|
MODULE_ALIAS_CXL(CXL_DEVICE_PORT);
|