mm/devm_memremap_pages: fix final page put race
Logan noticed that devm_memremap_pages_release() kills the percpu_ref
drops all the page references that were acquired at init and then
immediately proceeds to unplug, arch_remove_memory(), the backing pages
for the pagemap. If for some reason device shutdown actually collides
with a busy / elevated-ref-count page then arch_remove_memory() should
be deferred until after that reference is dropped.
As it stands the "wait for last page ref drop" happens *after*
devm_memremap_pages_release() returns, which is obviously too late and
can lead to crashes.
Fix this situation by assigning the responsibility to wait for the
percpu_ref to go idle to devm_memremap_pages() with a new ->cleanup()
callback. Implement the new cleanup callback for all
devm_memremap_pages() users: pmem, devdax, hmm, and p2pdma.
Link: http://lkml.kernel.org/r/155727339156.292046.5432007428235387859.stgit@dwillia2-desk3.amr.corp.intel.com
Fixes: 41e94a8513
("add devm_memremap_pages")
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reported-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1570175abd
commit
50f44ee724
|
@ -27,9 +27,8 @@ static void dev_dax_percpu_release(struct percpu_ref *ref)
|
|||
complete(&dev_dax->cmp);
|
||||
}
|
||||
|
||||
static void dev_dax_percpu_exit(void *data)
|
||||
static void dev_dax_percpu_exit(struct percpu_ref *ref)
|
||||
{
|
||||
struct percpu_ref *ref = data;
|
||||
struct dev_dax *dev_dax = ref_to_dev_dax(ref);
|
||||
|
||||
dev_dbg(&dev_dax->dev, "%s\n", __func__);
|
||||
|
@ -466,18 +465,12 @@ int dev_dax_probe(struct device *dev)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
dev_dax->pgmap.ref = &dev_dax->ref;
|
||||
dev_dax->pgmap.kill = dev_dax_percpu_kill;
|
||||
dev_dax->pgmap.cleanup = dev_dax_percpu_exit;
|
||||
addr = devm_memremap_pages(dev, &dev_dax->pgmap);
|
||||
if (IS_ERR(addr)) {
|
||||
devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref);
|
||||
percpu_ref_exit(&dev_dax->ref);
|
||||
if (IS_ERR(addr))
|
||||
return PTR_ERR(addr);
|
||||
}
|
||||
|
||||
inode = dax_inode(dax_dev);
|
||||
cdev = inode->i_cdev;
|
||||
|
|
|
@ -303,11 +303,19 @@ static const struct attribute_group *pmem_attribute_groups[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static void pmem_release_queue(void *q)
|
||||
static void __pmem_release_queue(struct percpu_ref *ref)
|
||||
{
|
||||
struct request_queue *q;
|
||||
|
||||
q = container_of(ref, typeof(*q), q_usage_counter);
|
||||
blk_cleanup_queue(q);
|
||||
}
|
||||
|
||||
static void pmem_release_queue(void *ref)
|
||||
{
|
||||
__pmem_release_queue(ref);
|
||||
}
|
||||
|
||||
static void pmem_freeze_queue(struct percpu_ref *ref)
|
||||
{
|
||||
struct request_queue *q;
|
||||
|
@ -399,12 +407,10 @@ static int pmem_attach_disk(struct device *dev,
|
|||
if (!q)
|
||||
return -ENOMEM;
|
||||
|
||||
if (devm_add_action_or_reset(dev, pmem_release_queue, q))
|
||||
return -ENOMEM;
|
||||
|
||||
pmem->pfn_flags = PFN_DEV;
|
||||
pmem->pgmap.ref = &q->q_usage_counter;
|
||||
pmem->pgmap.kill = pmem_freeze_queue;
|
||||
pmem->pgmap.cleanup = __pmem_release_queue;
|
||||
if (is_nd_pfn(dev)) {
|
||||
if (setup_pagemap_fsdax(dev, &pmem->pgmap))
|
||||
return -ENOMEM;
|
||||
|
@ -425,6 +431,9 @@ static int pmem_attach_disk(struct device *dev,
|
|||
pmem->pfn_flags |= PFN_MAP;
|
||||
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
|
||||
} else {
|
||||
if (devm_add_action_or_reset(dev, pmem_release_queue,
|
||||
&q->q_usage_counter))
|
||||
return -ENOMEM;
|
||||
addr = devm_memremap(dev, pmem->phys_addr,
|
||||
pmem->size, ARCH_MEMREMAP_PMEM);
|
||||
memcpy(&bb_res, &nsio->res, sizeof(bb_res));
|
||||
|
|
|
@ -95,7 +95,7 @@ static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
|
|||
percpu_ref_kill(ref);
|
||||
}
|
||||
|
||||
static void pci_p2pdma_percpu_cleanup(void *ref)
|
||||
static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref)
|
||||
{
|
||||
struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
|
||||
|
||||
|
@ -198,16 +198,6 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
|
|||
if (error)
|
||||
goto pgmap_free;
|
||||
|
||||
/*
|
||||
* FIXME: the percpu_ref_exit needs to be coordinated internal
|
||||
* to devm_memremap_pages_release(). Duplicate the same ordering
|
||||
* as other devm_memremap_pages() users for now.
|
||||
*/
|
||||
error = devm_add_action(&pdev->dev, pci_p2pdma_percpu_cleanup,
|
||||
&p2p_pgmap->ref);
|
||||
if (error)
|
||||
goto ref_cleanup;
|
||||
|
||||
pgmap = &p2p_pgmap->pgmap;
|
||||
|
||||
pgmap->res.start = pci_resource_start(pdev, bar) + offset;
|
||||
|
@ -218,11 +208,12 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
|
|||
pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
|
||||
pci_resource_start(pdev, bar);
|
||||
pgmap->kill = pci_p2pdma_percpu_kill;
|
||||
pgmap->cleanup = pci_p2pdma_percpu_cleanup;
|
||||
|
||||
addr = devm_memremap_pages(&pdev->dev, pgmap);
|
||||
if (IS_ERR(addr)) {
|
||||
error = PTR_ERR(addr);
|
||||
goto ref_exit;
|
||||
goto pgmap_free;
|
||||
}
|
||||
|
||||
error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
|
||||
|
@ -239,8 +230,6 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
|
|||
|
||||
pages_free:
|
||||
devm_memunmap_pages(&pdev->dev, pgmap);
|
||||
ref_cleanup:
|
||||
percpu_ref_exit(&p2p_pgmap->ref);
|
||||
pgmap_free:
|
||||
devm_kfree(&pdev->dev, p2p_pgmap);
|
||||
return error;
|
||||
|
|
|
@ -81,6 +81,7 @@ typedef void (*dev_page_free_t)(struct page *page, void *data);
|
|||
* @res: physical address range covered by @ref
|
||||
* @ref: reference count that pins the devm_memremap_pages() mapping
|
||||
* @kill: callback to transition @ref to the dead state
|
||||
* @cleanup: callback to wait for @ref to be idle and reap it
|
||||
* @dev: host device of the mapping for debug
|
||||
* @data: private data pointer for page_free()
|
||||
* @type: memory type: see MEMORY_* in memory_hotplug.h
|
||||
|
@ -92,6 +93,7 @@ struct dev_pagemap {
|
|||
struct resource res;
|
||||
struct percpu_ref *ref;
|
||||
void (*kill)(struct percpu_ref *ref);
|
||||
void (*cleanup)(struct percpu_ref *ref);
|
||||
struct device *dev;
|
||||
void *data;
|
||||
enum memory_type type;
|
||||
|
|
|
@ -95,6 +95,7 @@ static void devm_memremap_pages_release(void *data)
|
|||
pgmap->kill(pgmap->ref);
|
||||
for_each_device_pfn(pfn, pgmap)
|
||||
put_page(pfn_to_page(pfn));
|
||||
pgmap->cleanup(pgmap->ref);
|
||||
|
||||
/* pages are dead and unused, undo the arch mapping */
|
||||
align_start = res->start & ~(SECTION_SIZE - 1);
|
||||
|
@ -133,8 +134,8 @@ static void devm_memremap_pages_release(void *data)
|
|||
* 2/ The altmap field may optionally be initialized, in which case altmap_valid
|
||||
* must be set to true
|
||||
*
|
||||
* 3/ pgmap->ref must be 'live' on entry and will be killed at
|
||||
* devm_memremap_pages_release() time, or if this routine fails.
|
||||
* 3/ pgmap->ref must be 'live' on entry and will be killed and reaped
|
||||
* at devm_memremap_pages_release() time, or if this routine fails.
|
||||
*
|
||||
* 4/ res is expected to be a host memory range that could feasibly be
|
||||
* treated as a "System RAM" range, i.e. not a device mmio range, but
|
||||
|
@ -156,8 +157,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|||
pgprot_t pgprot = PAGE_KERNEL;
|
||||
int error, nid, is_ram;
|
||||
|
||||
if (!pgmap->ref || !pgmap->kill)
|
||||
if (!pgmap->ref || !pgmap->kill || !pgmap->cleanup) {
|
||||
WARN(1, "Missing reference count teardown definition\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
align_start = res->start & ~(SECTION_SIZE - 1);
|
||||
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
|
||||
|
@ -168,14 +171,16 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|||
if (conflict_pgmap) {
|
||||
dev_WARN(dev, "Conflicting mapping in same section\n");
|
||||
put_dev_pagemap(conflict_pgmap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
error = -ENOMEM;
|
||||
goto err_array;
|
||||
}
|
||||
|
||||
conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
|
||||
if (conflict_pgmap) {
|
||||
dev_WARN(dev, "Conflicting mapping in same section\n");
|
||||
put_dev_pagemap(conflict_pgmap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
error = -ENOMEM;
|
||||
goto err_array;
|
||||
}
|
||||
|
||||
is_ram = region_intersects(align_start, align_size,
|
||||
|
@ -267,6 +272,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|||
pgmap_array_delete(res);
|
||||
err_array:
|
||||
pgmap->kill(pgmap->ref);
|
||||
pgmap->cleanup(pgmap->ref);
|
||||
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_memremap_pages);
|
||||
|
|
14
mm/hmm.c
14
mm/hmm.c
|
@ -1354,9 +1354,8 @@ static void hmm_devmem_ref_release(struct percpu_ref *ref)
|
|||
complete(&devmem->completion);
|
||||
}
|
||||
|
||||
static void hmm_devmem_ref_exit(void *data)
|
||||
static void hmm_devmem_ref_exit(struct percpu_ref *ref)
|
||||
{
|
||||
struct percpu_ref *ref = data;
|
||||
struct hmm_devmem *devmem;
|
||||
|
||||
devmem = container_of(ref, struct hmm_devmem, ref);
|
||||
|
@ -1433,10 +1432,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
|||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
size = ALIGN(size, PA_SECTION_SIZE);
|
||||
addr = min((unsigned long)iomem_resource.end,
|
||||
(1UL << MAX_PHYSMEM_BITS) - 1);
|
||||
|
@ -1475,6 +1470,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
|||
devmem->pagemap.ref = &devmem->ref;
|
||||
devmem->pagemap.data = devmem;
|
||||
devmem->pagemap.kill = hmm_devmem_ref_kill;
|
||||
devmem->pagemap.cleanup = hmm_devmem_ref_exit;
|
||||
|
||||
result = devm_memremap_pages(devmem->device, &devmem->pagemap);
|
||||
if (IS_ERR(result))
|
||||
|
@ -1512,11 +1508,6 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
|||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
|
||||
&devmem->ref);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
|
||||
devmem->pfn_last = devmem->pfn_first +
|
||||
(resource_size(devmem->resource) >> PAGE_SHIFT);
|
||||
|
@ -1529,6 +1520,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
|||
devmem->pagemap.ref = &devmem->ref;
|
||||
devmem->pagemap.data = devmem;
|
||||
devmem->pagemap.kill = hmm_devmem_ref_kill;
|
||||
devmem->pagemap.cleanup = hmm_devmem_ref_exit;
|
||||
|
||||
result = devm_memremap_pages(devmem->device, &devmem->pagemap);
|
||||
if (IS_ERR(result))
|
||||
|
|
|
@ -100,7 +100,9 @@ static void nfit_test_kill(void *_pgmap)
|
|||
{
|
||||
struct dev_pagemap *pgmap = _pgmap;
|
||||
|
||||
WARN_ON(!pgmap || !pgmap->ref || !pgmap->kill || !pgmap->cleanup);
|
||||
pgmap->kill(pgmap->ref);
|
||||
pgmap->cleanup(pgmap->ref);
|
||||
}
|
||||
|
||||
void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
||||
|
|
Loading…
Reference in New Issue