cxl/core: Split decoder setup into alloc + add

The kbuild robot reports:

    drivers/cxl/core/bus.c:516:1: warning: stack frame size (1032) exceeds
    limit (1024) in function 'devm_cxl_add_decoder'

It is also the case the devm_cxl_add_decoder() is unwieldy to use for
all the different decoder types. Fix the stack usage by splitting the
creation into alloc and add steps. This also allows for context
specific construction before adding.

With the split the caller is responsible for registering a devm callback
to trigger device_unregister() for the decoder rather than it being
implicit in the decoder registration. I.e. the routine that calls alloc
is responsible for calling put_device() if the "add" operation fails.

Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Nathan Chancellor <nathan@kernel.org>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Ben Widawsky <ben.widawsky@intel.com>
Link: https://lore.kernel.org/r/163225205828.3038145.6831131648369404859.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Dan Williams 2021-09-21 12:22:16 -07:00
parent 7d3eb23c4c
commit 48667f6761
5 changed files with 114 additions and 126 deletions

View File

@ -82,7 +82,6 @@ static void cxl_add_cfmws_decoders(struct device *dev,
struct cxl_decoder *cxld; struct cxl_decoder *cxld;
acpi_size len, cur = 0; acpi_size len, cur = 0;
void *cedt_subtable; void *cedt_subtable;
unsigned long flags;
int rc; int rc;
len = acpi_cedt->length - sizeof(*acpi_cedt); len = acpi_cedt->length - sizeof(*acpi_cedt);
@ -119,24 +118,36 @@ static void cxl_add_cfmws_decoders(struct device *dev,
for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++) for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
target_map[i] = cfmws->interleave_targets[i]; target_map[i] = cfmws->interleave_targets[i];
flags = cfmws_to_decoder_flags(cfmws->restrictions); cxld = cxl_decoder_alloc(root_port,
cxld = devm_cxl_add_decoder(dev, root_port, CFMWS_INTERLEAVE_WAYS(cfmws));
CFMWS_INTERLEAVE_WAYS(cfmws), if (IS_ERR(cxld))
cfmws->base_hpa, cfmws->window_size, goto next;
CFMWS_INTERLEAVE_WAYS(cfmws),
CFMWS_INTERLEAVE_GRANULARITY(cfmws),
CXL_DECODER_EXPANDER,
flags, target_map);
if (IS_ERR(cxld)) { cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->range = (struct range) {
.start = cfmws->base_hpa,
.end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
cxld->interleave_granularity =
CFMWS_INTERLEAVE_GRANULARITY(cfmws);
rc = cxl_decoder_add(cxld, target_map);
if (rc)
put_device(&cxld->dev);
else
rc = cxl_decoder_autoremove(dev, cxld);
if (rc) {
dev_err(dev, "Failed to add decoder for %#llx-%#llx\n", dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
cfmws->base_hpa, cfmws->base_hpa + cfmws->base_hpa, cfmws->base_hpa +
cfmws->window_size - 1); cfmws->window_size - 1);
} else { goto next;
dev_dbg(dev, "add: %s range %#llx-%#llx\n",
dev_name(&cxld->dev), cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
} }
dev_dbg(dev, "add: %s range %#llx-%#llx\n",
dev_name(&cxld->dev), cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
next:
cur += c->length; cur += c->length;
} }
} }
@ -266,6 +277,7 @@ static int add_host_bridge_uport(struct device *match, void *arg)
struct acpi_device *bridge = to_cxl_host_bridge(host, match); struct acpi_device *bridge = to_cxl_host_bridge(host, match);
struct acpi_pci_root *pci_root; struct acpi_pci_root *pci_root;
struct cxl_walk_context ctx; struct cxl_walk_context ctx;
int single_port_map[1], rc;
struct cxl_decoder *cxld; struct cxl_decoder *cxld;
struct cxl_dport *dport; struct cxl_dport *dport;
struct cxl_port *port; struct cxl_port *port;
@ -301,22 +313,46 @@ static int add_host_bridge_uport(struct device *match, void *arg)
return -ENODEV; return -ENODEV;
if (ctx.error) if (ctx.error)
return ctx.error; return ctx.error;
if (ctx.count > 1)
return 0;
/* TODO: Scan CHBCR for HDM Decoder resources */ /* TODO: Scan CHBCR for HDM Decoder resources */
/* /*
* In the single-port host-bridge case there are no HDM decoders * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability
* in the CHBCR and a 1:1 passthrough decode is implied. * Structure) single ported host-bridges need not publish a decoder
* capability when a passthrough decode can be assumed, i.e. all
* transactions that the uport sees are claimed and passed to the single
* dport. Disable the range until the first CXL region is enumerated /
* activated.
*/ */
if (ctx.count == 1) { cxld = cxl_decoder_alloc(port, 1);
cxld = devm_cxl_add_passthrough_decoder(host, port); if (IS_ERR(cxld))
if (IS_ERR(cxld)) return PTR_ERR(cxld);
return PTR_ERR(cxld);
cxld->interleave_ways = 1;
cxld->interleave_granularity = PAGE_SIZE;
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->range = (struct range) {
.start = 0,
.end = -1,
};
device_lock(&port->dev);
dport = list_first_entry(&port->dports, typeof(*dport), list);
device_unlock(&port->dev);
single_port_map[0] = dport->port_id;
rc = cxl_decoder_add(cxld, single_port_map);
if (rc)
put_device(&cxld->dev);
else
rc = cxl_decoder_autoremove(host, cxld);
if (rc == 0)
dev_dbg(host, "add: %s\n", dev_name(&cxld->dev)); dev_dbg(host, "add: %s\n", dev_name(&cxld->dev));
} return rc;
return 0;
} }
static int add_host_bridge_dport(struct device *match, void *arg) static int add_host_bridge_dport(struct device *match, void *arg)

View File

@ -453,10 +453,8 @@ err:
} }
EXPORT_SYMBOL_GPL(cxl_add_dport); EXPORT_SYMBOL_GPL(cxl_add_dport);
static int decoder_populate_targets(struct device *host, static int decoder_populate_targets(struct cxl_decoder *cxld,
struct cxl_decoder *cxld, struct cxl_port *port, int *target_map)
struct cxl_port *port, int *target_map,
int nr_targets)
{ {
int rc = 0, i; int rc = 0, i;
@ -464,66 +462,48 @@ static int decoder_populate_targets(struct device *host,
return 0; return 0;
device_lock(&port->dev); device_lock(&port->dev);
for (i = 0; i < nr_targets; i++) { if (list_empty(&port->dports)) {
rc = -EINVAL;
goto out_unlock;
}
for (i = 0; i < cxld->nr_targets; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]); struct cxl_dport *dport = find_dport(port, target_map[i]);
if (!dport) { if (!dport) {
rc = -ENXIO; rc = -ENXIO;
break; goto out_unlock;
} }
dev_dbg(host, "%s: target: %d\n", dev_name(dport->dport), i);
cxld->target[i] = dport; cxld->target[i] = dport;
} }
out_unlock:
device_unlock(&port->dev); device_unlock(&port->dev);
return rc; return rc;
} }
static struct cxl_decoder * struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
cxl_decoder_alloc(struct device *host, struct cxl_port *port, int nr_targets,
resource_size_t base, resource_size_t len,
int interleave_ways, int interleave_granularity,
enum cxl_decoder_type type, unsigned long flags,
int *target_map)
{ {
struct cxl_decoder *cxld; struct cxl_decoder *cxld, cxld_const_init = {
.nr_targets = nr_targets,
};
struct device *dev; struct device *dev;
int rc = 0; int rc = 0;
if (interleave_ways < 1) if (nr_targets > CXL_DECODER_MAX_INTERLEAVE || nr_targets < 1)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
device_lock(&port->dev);
if (list_empty(&port->dports))
rc = -EINVAL;
device_unlock(&port->dev);
if (rc)
return ERR_PTR(rc);
cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL); cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
if (!cxld) if (!cxld)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
memcpy(cxld, &cxld_const_init, sizeof(cxld_const_init));
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
if (rc < 0) if (rc < 0)
goto err; goto err;
*cxld = (struct cxl_decoder) { cxld->id = rc;
.id = rc,
.range = {
.start = base,
.end = base + len - 1,
},
.flags = flags,
.interleave_ways = interleave_ways,
.interleave_granularity = interleave_granularity,
.target_type = type,
};
rc = decoder_populate_targets(host, cxld, port, target_map, nr_targets);
if (rc)
goto err;
dev = &cxld->dev; dev = &cxld->dev;
device_initialize(dev); device_initialize(dev);
device_set_pm_not_required(dev); device_set_pm_not_required(dev);
@ -541,72 +521,47 @@ err:
kfree(cxld); kfree(cxld);
return ERR_PTR(rc); return ERR_PTR(rc);
} }
EXPORT_SYMBOL_GPL(cxl_decoder_alloc);
struct cxl_decoder * int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
resource_size_t base, resource_size_t len,
int interleave_ways, int interleave_granularity,
enum cxl_decoder_type type, unsigned long flags,
int *target_map)
{ {
struct cxl_decoder *cxld; struct cxl_port *port;
struct device *dev; struct device *dev;
int rc; int rc;
if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) if (WARN_ON_ONCE(!cxld))
return ERR_PTR(-EINVAL); return -EINVAL;
cxld = cxl_decoder_alloc(host, port, nr_targets, base, len, if (WARN_ON_ONCE(IS_ERR(cxld)))
interleave_ways, interleave_granularity, type, return PTR_ERR(cxld);
flags, target_map);
if (IS_ERR(cxld)) if (cxld->interleave_ways < 1)
return cxld; return -EINVAL;
port = to_cxl_port(cxld->dev.parent);
rc = decoder_populate_targets(cxld, port, target_map);
if (rc)
return rc;
dev = &cxld->dev; dev = &cxld->dev;
rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
if (rc) if (rc)
goto err; return rc;
rc = device_add(dev); return device_add(dev);
if (rc)
goto err;
rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev);
if (rc)
return ERR_PTR(rc);
return cxld;
err:
put_device(dev);
return ERR_PTR(rc);
} }
EXPORT_SYMBOL_GPL(devm_cxl_add_decoder); EXPORT_SYMBOL_GPL(cxl_decoder_add);
/* static void cxld_unregister(void *dev)
* Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
* single ported host-bridges need not publish a decoder capability when a
* passthrough decode can be assumed, i.e. all transactions that the uport sees
* are claimed and passed to the single dport. Default the range a 0-base
* 0-length until the first CXL region is activated.
*/
struct cxl_decoder *devm_cxl_add_passthrough_decoder(struct device *host,
struct cxl_port *port)
{ {
struct cxl_dport *dport; device_unregister(dev);
int target_map[1];
device_lock(&port->dev);
dport = list_first_entry_or_null(&port->dports, typeof(*dport), list);
device_unlock(&port->dev);
if (!dport)
return ERR_PTR(-ENXIO);
target_map[0] = dport->port_id;
return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE,
CXL_DECODER_EXPANDER, 0, target_map);
} }
EXPORT_SYMBOL_GPL(devm_cxl_add_passthrough_decoder);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
{
return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
}
EXPORT_SYMBOL_GPL(cxl_decoder_autoremove);
/** /**
* __cxl_driver_register - register a driver for the cxl bus * __cxl_driver_register - register a driver for the cxl bus

View File

@ -9,11 +9,6 @@ extern const struct device_type cxl_nvdimm_type;
extern struct attribute_group cxl_base_attribute_group; extern struct attribute_group cxl_base_attribute_group;
static inline void unregister_cxl_dev(void *dev)
{
device_unregister(dev);
}
struct cxl_send_command; struct cxl_send_command;
struct cxl_mem_query_commands; struct cxl_mem_query_commands;
int cxl_query_cmd(struct cxl_memdev *cxlmd, int cxl_query_cmd(struct cxl_memdev *cxlmd,

View File

@ -222,6 +222,11 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
return cxl_nvd; return cxl_nvd;
} }
static void cxl_nvd_unregister(void *dev)
{
device_unregister(dev);
}
/** /**
* devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
* @host: same host as @cxlmd * @host: same host as @cxlmd
@ -251,7 +256,7 @@ int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
dev_dbg(host, "%s: register %s\n", dev_name(dev->parent), dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
dev_name(dev)); dev_name(dev));
return devm_add_action_or_reset(host, unregister_cxl_dev, dev); return devm_add_action_or_reset(host, cxl_nvd_unregister, dev);
err: err:
put_device(dev); put_device(dev);

View File

@ -195,6 +195,7 @@ enum cxl_decoder_type {
* @interleave_granularity: data stride per dport * @interleave_granularity: data stride per dport
* @target_type: accelerator vs expander (type2 vs type3) selector * @target_type: accelerator vs expander (type2 vs type3) selector
* @flags: memory type capabilities and locking * @flags: memory type capabilities and locking
* @nr_targets: number of elements in @target
* @target: active ordered target list in current decoder configuration * @target: active ordered target list in current decoder configuration
*/ */
struct cxl_decoder { struct cxl_decoder {
@ -205,6 +206,7 @@ struct cxl_decoder {
int interleave_granularity; int interleave_granularity;
enum cxl_decoder_type target_type; enum cxl_decoder_type target_type;
unsigned long flags; unsigned long flags;
const int nr_targets;
struct cxl_dport *target[]; struct cxl_dport *target[];
}; };
@ -286,15 +288,10 @@ int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id,
struct cxl_decoder *to_cxl_decoder(struct device *dev); struct cxl_decoder *to_cxl_decoder(struct device *dev);
bool is_root_decoder(struct device *dev); bool is_root_decoder(struct device *dev);
struct cxl_decoder * struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets);
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
resource_size_t base, resource_size_t len, int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
int interleave_ways, int interleave_granularity,
enum cxl_decoder_type type, unsigned long flags,
int *target_map);
struct cxl_decoder *devm_cxl_add_passthrough_decoder(struct device *host,
struct cxl_port *port);
extern struct bus_type cxl_bus_type; extern struct bus_type cxl_bus_type;
struct cxl_driver { struct cxl_driver {