Merge branch 'for-5.1/libnvdimm-start-pad' into libnvdimm-for-next
Merge the initial lead-in cleanups and fixes that resulted from the effort to resolve bugs in the section-alignment padding implementation in the nvdimm core. The back half of this topic is abandoned in favor of implementing sub-section hotplug support.
This commit is contained in:
commit
6fd96ff557
|
@ -86,12 +86,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
|||
{
|
||||
struct dax_device *dax_dev;
|
||||
bool dax_enabled = false;
|
||||
pgoff_t pgoff, pgoff_end;
|
||||
struct request_queue *q;
|
||||
pgoff_t pgoff;
|
||||
int err, id;
|
||||
pfn_t pfn;
|
||||
long len;
|
||||
char buf[BDEVNAME_SIZE];
|
||||
void *kaddr, *end_kaddr;
|
||||
pfn_t pfn, end_pfn;
|
||||
sector_t last_page;
|
||||
long len, len2;
|
||||
int err, id;
|
||||
|
||||
if (blocksize != PAGE_SIZE) {
|
||||
pr_debug("%s: error: unsupported blocksize for dax\n",
|
||||
|
@ -113,6 +115,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
|||
return false;
|
||||
}
|
||||
|
||||
last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
|
||||
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
|
||||
if (err) {
|
||||
pr_debug("%s: error: unaligned partition for dax\n",
|
||||
bdevname(bdev, buf));
|
||||
return false;
|
||||
}
|
||||
|
||||
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
||||
if (!dax_dev) {
|
||||
pr_debug("%s: error: device does not support dax\n",
|
||||
|
@ -121,14 +131,15 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
|||
}
|
||||
|
||||
id = dax_read_lock();
|
||||
len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
|
||||
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
|
||||
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
|
||||
dax_read_unlock(id);
|
||||
|
||||
put_dax(dax_dev);
|
||||
|
||||
if (len < 1) {
|
||||
if (len < 1 || len2 < 1) {
|
||||
pr_debug("%s: error: dax access failed (%ld)\n",
|
||||
bdevname(bdev, buf), len);
|
||||
bdevname(bdev, buf), len < 1 ? len : len2);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -143,13 +154,20 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
|||
*/
|
||||
WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
|
||||
dax_enabled = true;
|
||||
} else if (pfn_t_devmap(pfn)) {
|
||||
struct dev_pagemap *pgmap;
|
||||
} else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
|
||||
struct dev_pagemap *pgmap, *end_pgmap;
|
||||
|
||||
pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
|
||||
if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX)
|
||||
end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
|
||||
if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
|
||||
&& pfn_t_to_page(pfn)->pgmap == pgmap
|
||||
&& pfn_t_to_page(end_pfn)->pgmap == pgmap
|
||||
&& pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
|
||||
&& pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
|
||||
dax_enabled = true;
|
||||
put_dev_pagemap(pgmap);
|
||||
put_dev_pagemap(end_pgmap);
|
||||
|
||||
}
|
||||
|
||||
if (!dax_enabled) {
|
||||
|
|
|
@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
|
|||
bool pmem_should_map_pages(struct device *dev)
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
struct nd_namespace_common *ndns = to_ndns(dev);
|
||||
struct nd_namespace_io *nsio;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
|
||||
|
@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
|
|||
if (is_nd_pfn(dev) || is_nd_btt(dev))
|
||||
return false;
|
||||
|
||||
if (ndns->force_raw)
|
||||
return false;
|
||||
|
||||
nsio = to_nd_namespace_io(dev);
|
||||
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
|
||||
IORESOURCE_SYSTEM_RAM,
|
||||
|
|
|
@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
|
|||
}
|
||||
EXPORT_SYMBOL(nd_pfn_probe);
|
||||
|
||||
static u32 info_block_reserve(void)
|
||||
{
|
||||
return ALIGN(SZ_8K, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* We hotplug memory at section granularity, pad the reserved area from
|
||||
* the previous section base to the namespace base address.
|
||||
|
@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base)
|
|||
|
||||
static unsigned long init_altmap_reserve(resource_size_t base)
|
||||
{
|
||||
unsigned long reserve = PHYS_PFN(SZ_8K);
|
||||
unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
|
||||
unsigned long base_pfn = PHYS_PFN(base);
|
||||
|
||||
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
|
||||
|
@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
|||
u64 offset = le64_to_cpu(pfn_sb->dataoff);
|
||||
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
|
||||
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
|
||||
u32 reserve = info_block_reserve();
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
resource_size_t base = nsio->res.start + start_pad;
|
||||
|
@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
|||
res->end -= end_trunc;
|
||||
|
||||
if (nd_pfn->mode == PFN_MODE_RAM) {
|
||||
if (offset < SZ_8K)
|
||||
if (offset < reserve)
|
||||
return -EINVAL;
|
||||
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
|
||||
pgmap->altmap_valid = false;
|
||||
|
@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
|||
le64_to_cpu(nd_pfn->pfn_sb->npfns),
|
||||
nd_pfn->npfns);
|
||||
memcpy(altmap, &__altmap, sizeof(*altmap));
|
||||
altmap->free = PHYS_PFN(offset - SZ_8K);
|
||||
altmap->free = PHYS_PFN(offset - reserve);
|
||||
altmap->alloc = 0;
|
||||
pgmap->altmap_valid = true;
|
||||
} else
|
||||
|
@ -678,18 +684,17 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
|
|||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED
|
||||
|| !IS_ALIGNED(end, nd_pfn->align)
|
||||
|| nd_region_conflict(nd_region, start, size + adjust))
|
||||
|| nd_region_conflict(nd_region, start, size))
|
||||
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
|
||||
}
|
||||
|
||||
static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
{
|
||||
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
u32 start_pad, end_trunc, reserve = info_block_reserve();
|
||||
resource_size_t start, size;
|
||||
struct nd_region *nd_region;
|
||||
u32 start_pad, end_trunc;
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
unsigned long npfns;
|
||||
phys_addr_t offset;
|
||||
|
@ -734,7 +739,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||
*/
|
||||
start = nsio->res.start + start_pad;
|
||||
size = resource_size(&nsio->res);
|
||||
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
|
||||
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
|
||||
/ PAGE_SIZE);
|
||||
if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||
/*
|
||||
|
@ -742,11 +747,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||
* when populating the vmemmap. This *should* be equal to
|
||||
* PMD_SIZE for most architectures.
|
||||
*/
|
||||
offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
|
||||
offset = ALIGN(start + reserve + 64 * npfns,
|
||||
max(nd_pfn->align, PMD_SIZE)) - start;
|
||||
} else if (nd_pfn->mode == PFN_MODE_RAM)
|
||||
offset = ALIGN(start + SZ_8K + dax_label_reserve,
|
||||
nd_pfn->align) - start;
|
||||
offset = ALIGN(start + reserve, nd_pfn->align) - start;
|
||||
else
|
||||
return -ENXIO;
|
||||
|
||||
|
|
Loading…
Reference in New Issue