libnvdimm, pfn: Pad pfn namespaces relative to other regions
Commitcfe30b8720
"libnvdimm, pmem: adjust for section collisions with 'System RAM'" enabled Linux to workaround occasions where platform firmware arranges for "System RAM" and "Persistent Memory" to collide within a single section boundary. Unfortunately, as reported in this issue [1], platform firmware can inflict the same collision between persistent memory regions. The approach of interrogating iomem_resource does not work in this case because platform firmware may merge multiple regions into a single iomem_resource range. Instead provide a method to interrogate regions that share the same parent bus. This is a stop-gap until the core-MM can grow support for hotplug on sub-section boundaries. [1]: https://github.com/pmem/ndctl/issues/76 Fixes:cfe30b8720
("libnvdimm, pmem: adjust for section collisions with...") Cc: <stable@vger.kernel.org> Reported-by: Patrick Geary <patrickg@supermicro.com> Tested-by: Patrick Geary <patrickg@supermicro.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
e3f5df762d
commit
ae86cbfef3
|
@ -111,6 +111,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
|
|||
struct nd_mapping *nd_mapping, resource_size_t *overlap);
|
||||
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
|
||||
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
|
||||
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
|
||||
resource_size_t size);
|
||||
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
|
||||
struct nd_label_id *label_id);
|
||||
int alias_dpa_busy(struct device *dev, void *data);
|
||||
|
|
|
@ -649,14 +649,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
|
|||
ALIGN_DOWN(phys, nd_pfn->align));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if pmem collides with 'System RAM', or other regions when
|
||||
* section aligned. Trim it accordingly.
|
||||
*/
|
||||
static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
|
||||
{
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
|
||||
const resource_size_t start = nsio->res.start;
|
||||
const resource_size_t end = start + resource_size(&nsio->res);
|
||||
resource_size_t adjust, size;
|
||||
|
||||
*start_pad = 0;
|
||||
*end_trunc = 0;
|
||||
|
||||
adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
|
||||
size = resource_size(&nsio->res) + adjust;
|
||||
if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED
|
||||
|| nd_region_conflict(nd_region, start - adjust, size))
|
||||
*start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
|
||||
|
||||
/* Now check that end of the range does not collide. */
|
||||
adjust = PHYS_SECTION_ALIGN_UP(end) - end;
|
||||
size = resource_size(&nsio->res) + adjust;
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED
|
||||
|| !IS_ALIGNED(end, nd_pfn->align)
|
||||
|| nd_region_conflict(nd_region, start, size + adjust))
|
||||
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
|
||||
}
|
||||
|
||||
static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
{
|
||||
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
u32 start_pad = 0, end_trunc = 0;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
resource_size_t start, size;
|
||||
struct nd_namespace_io *nsio;
|
||||
struct nd_region *nd_region;
|
||||
u32 start_pad, end_trunc;
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
unsigned long npfns;
|
||||
phys_addr_t offset;
|
||||
|
@ -688,30 +721,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||
|
||||
memset(pfn_sb, 0, sizeof(*pfn_sb));
|
||||
|
||||
/*
|
||||
* Check if pmem collides with 'System RAM' when section aligned and
|
||||
* trim it accordingly
|
||||
*/
|
||||
nsio = to_nd_namespace_io(&ndns->dev);
|
||||
start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
|
||||
size = resource_size(&nsio->res);
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED) {
|
||||
start = nsio->res.start;
|
||||
start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
|
||||
}
|
||||
|
||||
start = nsio->res.start;
|
||||
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED
|
||||
|| !IS_ALIGNED(start + resource_size(&nsio->res),
|
||||
nd_pfn->align)) {
|
||||
size = resource_size(&nsio->res);
|
||||
end_trunc = start + size - phys_pmem_align_down(nd_pfn,
|
||||
start + size);
|
||||
}
|
||||
|
||||
trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
|
||||
if (start_pad + end_trunc)
|
||||
dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
|
||||
dev_name(&ndns->dev), start_pad + end_trunc);
|
||||
|
@ -722,7 +732,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||
* implementation will limit the pfns advertised through
|
||||
* ->direct_access() to those that are included in the memmap.
|
||||
*/
|
||||
start += start_pad;
|
||||
start = nsio->res.start + start_pad;
|
||||
size = resource_size(&nsio->res);
|
||||
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
|
||||
/ PAGE_SIZE);
|
||||
|
|
|
@ -1184,6 +1184,47 @@ int nvdimm_has_cache(struct nd_region *nd_region)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nvdimm_has_cache);
|
||||
|
||||
struct conflict_context {
|
||||
struct nd_region *nd_region;
|
||||
resource_size_t start, size;
|
||||
};
|
||||
|
||||
static int region_conflict(struct device *dev, void *data)
|
||||
{
|
||||
struct nd_region *nd_region;
|
||||
struct conflict_context *ctx = data;
|
||||
resource_size_t res_end, region_end, region_start;
|
||||
|
||||
if (!is_memory(dev))
|
||||
return 0;
|
||||
|
||||
nd_region = to_nd_region(dev);
|
||||
if (nd_region == ctx->nd_region)
|
||||
return 0;
|
||||
|
||||
res_end = ctx->start + ctx->size;
|
||||
region_start = nd_region->ndr_start;
|
||||
region_end = region_start + nd_region->ndr_size;
|
||||
if (ctx->start >= region_start && ctx->start < region_end)
|
||||
return -EBUSY;
|
||||
if (res_end > region_start && res_end <= region_end)
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
|
||||
resource_size_t size)
|
||||
{
|
||||
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
|
||||
struct conflict_context ctx = {
|
||||
.nd_region = nd_region,
|
||||
.start = start,
|
||||
.size = size,
|
||||
};
|
||||
|
||||
return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
|
||||
}
|
||||
|
||||
void __exit nd_region_devs_exit(void)
|
||||
{
|
||||
ida_destroy(®ion_ida);
|
||||
|
|
Loading…
Reference in New Issue