cxl/pci: Reserve individual register block regions
Some hardware implementations mix component and device registers into the same BAR and the driver stack is going to need independent mapping implementations for those 2 cases. Furthermore, it will be nice to have finer grained mappings should user space want to map some register blocks. Now that individual register blocks are mapped; those blocks regions should be reserved individually to fully separate the register blocks. Release the 'global' memory reservation and create individual register block region reservations through devm. NOTE: pci_release_mem_regions() is still compatible with pcim_enable_device() because it removes the automatic region release when called. So preserve the pcim_enable_device() so that the pcim interface can be called if needed. Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Ira Weiny <ira.weiny@intel.com> Link: https://lore.kernel.org/r/20210604005316.4187340-1-ira.weiny@intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
30af97296f
commit
9a016527dc
|
@ -79,11 +79,33 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
|
EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
|
||||||
|
|
||||||
|
static void __iomem *devm_cxl_iomap_block(struct pci_dev *pdev,
|
||||||
|
resource_size_t addr,
|
||||||
|
resource_size_t length)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
void __iomem *ret_val;
|
||||||
|
struct resource *res;
|
||||||
|
|
||||||
|
res = devm_request_mem_region(dev, addr, length, pci_name(pdev));
|
||||||
|
if (!res) {
|
||||||
|
resource_size_t end = addr + length - 1;
|
||||||
|
|
||||||
|
dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret_val = devm_ioremap(dev, addr, length);
|
||||||
|
if (!ret_val)
|
||||||
|
dev_err(dev, "Failed to map region %pr\n", res);
|
||||||
|
|
||||||
|
return ret_val;
|
||||||
|
}
|
||||||
|
|
||||||
int cxl_map_device_regs(struct pci_dev *pdev,
|
int cxl_map_device_regs(struct pci_dev *pdev,
|
||||||
struct cxl_device_regs *regs,
|
struct cxl_device_regs *regs,
|
||||||
struct cxl_register_map *map)
|
struct cxl_register_map *map)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
|
||||||
resource_size_t phys_addr;
|
resource_size_t phys_addr;
|
||||||
|
|
||||||
phys_addr = pci_resource_start(pdev, map->barno);
|
phys_addr = pci_resource_start(pdev, map->barno);
|
||||||
|
@ -95,7 +117,9 @@ int cxl_map_device_regs(struct pci_dev *pdev,
|
||||||
|
|
||||||
addr = phys_addr + map->device_map.status.offset;
|
addr = phys_addr + map->device_map.status.offset;
|
||||||
length = map->device_map.status.size;
|
length = map->device_map.status.size;
|
||||||
regs->status = devm_ioremap(dev, addr, length);
|
regs->status = devm_cxl_iomap_block(pdev, addr, length);
|
||||||
|
if (!regs->status)
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (map->device_map.mbox.valid) {
|
if (map->device_map.mbox.valid) {
|
||||||
|
@ -104,7 +128,9 @@ int cxl_map_device_regs(struct pci_dev *pdev,
|
||||||
|
|
||||||
addr = phys_addr + map->device_map.mbox.offset;
|
addr = phys_addr + map->device_map.mbox.offset;
|
||||||
length = map->device_map.mbox.size;
|
length = map->device_map.mbox.size;
|
||||||
regs->mbox = devm_ioremap(dev, addr, length);
|
regs->mbox = devm_cxl_iomap_block(pdev, addr, length);
|
||||||
|
if (!regs->mbox)
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (map->device_map.memdev.valid) {
|
if (map->device_map.memdev.valid) {
|
||||||
|
@ -113,7 +139,9 @@ int cxl_map_device_regs(struct pci_dev *pdev,
|
||||||
|
|
||||||
addr = phys_addr + map->device_map.memdev.offset;
|
addr = phys_addr + map->device_map.memdev.offset;
|
||||||
length = map->device_map.memdev.size;
|
length = map->device_map.memdev.size;
|
||||||
regs->memdev = devm_ioremap(dev, addr, length);
|
regs->memdev = devm_cxl_iomap_block(pdev, addr, length);
|
||||||
|
if (!regs->memdev)
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1110,6 +1110,8 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
|
||||||
goto free_maps;
|
goto free_maps;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pci_release_mem_regions(pdev);
|
||||||
|
|
||||||
list_for_each_entry(map, ®ister_maps, list) {
|
list_for_each_entry(map, ®ister_maps, list) {
|
||||||
ret = cxl_map_regs(cxlm, map);
|
ret = cxl_map_regs(cxlm, map);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
Loading…
Reference in New Issue