libnvdimm, pmem: use devm_add_action to release bdev resources

Register a callback to clean up the request_queue and put the gendisk at
driver disable time.

Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Dan Williams 2016-03-17 20:24:31 -07:00
parent 9d90725ddc
commit 030b99e39c
1 changed files with 39 additions and 49 deletions

View File

@ -198,6 +198,17 @@ static const struct block_device_operations pmem_fops = {
.revalidate_disk = nvdimm_revalidate_disk,
};
static void pmem_release_queue(void *q)
{
blk_cleanup_queue(q);
}
void pmem_release_disk(void *disk)
{
del_gendisk(disk);
put_disk(disk);
}
static struct pmem_device *pmem_alloc(struct device *dev,
struct resource *res, int id)
{
@ -234,25 +245,22 @@ static struct pmem_device *pmem_alloc(struct device *dev,
pmem->phys_addr, pmem->size,
ARCH_MEMREMAP_PMEM);
if (IS_ERR(pmem->virt_addr)) {
/*
* At release time the queue must be dead before
* devm_memremap_pages is unwound
*/
if (devm_add_action(dev, pmem_release_queue, q)) {
blk_cleanup_queue(q);
return (void __force *) pmem->virt_addr;
return ERR_PTR(-ENOMEM);
}
if (IS_ERR(pmem->virt_addr))
return (void __force *) pmem->virt_addr;
pmem->pmem_queue = q;
return pmem;
}
static void pmem_detach_disk(struct pmem_device *pmem)
{
if (!pmem->pmem_disk)
return;
del_gendisk(pmem->pmem_disk);
put_disk(pmem->pmem_disk);
blk_cleanup_queue(pmem->pmem_queue);
}
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns, struct pmem_device *pmem)
{
@ -269,8 +277,10 @@ static int pmem_attach_disk(struct device *dev,
pmem->pmem_queue->queuedata = pmem;
disk = alloc_disk_node(0, nid);
if (!disk) {
blk_cleanup_queue(pmem->pmem_queue);
if (!disk)
return -ENOMEM;
if (devm_add_action(dev, pmem_release_disk, disk)) {
put_disk(disk);
return -ENOMEM;
}
@ -427,15 +437,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
}
static void nvdimm_namespace_detach_pfn(struct nd_pfn *nd_pfn)
{
struct pmem_device *pmem;
/* free pmem disk */
pmem = dev_get_drvdata(&nd_pfn->dev);
pmem_detach_disk(pmem);
}
/*
* We hotplug memory at section granularity, pad the reserved area from
* the previous section base to the namespace base address.
@ -458,7 +459,6 @@ static unsigned long init_altmap_reserve(resource_size_t base)
static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
{
int rc;
struct resource res;
struct request_queue *q;
struct pmem_device *pmem;
@ -495,35 +495,33 @@ static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
altmap = & __altmap;
altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
altmap->alloc = 0;
} else {
rc = -ENXIO;
goto err;
}
} else
return -ENXIO;
/* establish pfn range for lookup, and switch to direct map */
q = pmem->pmem_queue;
memcpy(&res, &nsio->res, sizeof(res));
res.start += start_pad;
res.end -= end_trunc;
devm_remove_action(dev, pmem_release_queue, q);
devm_memunmap(dev, (void __force *) pmem->virt_addr);
pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
&q->q_usage_counter, altmap);
pmem->pfn_flags |= PFN_MAP;
if (IS_ERR(pmem->virt_addr)) {
rc = PTR_ERR(pmem->virt_addr);
goto err;
/*
* At release time the queue must be dead before
* devm_memremap_pages is unwound
*/
if (devm_add_action(dev, pmem_release_queue, q)) {
blk_cleanup_queue(q);
return -ENOMEM;
}
if (IS_ERR(pmem->virt_addr))
return PTR_ERR(pmem->virt_addr);
/* attach pmem disk in "pfn-mode" */
rc = pmem_attach_disk(dev, ndns, pmem);
if (rc)
goto err;
return rc;
err:
nvdimm_namespace_detach_pfn(nd_pfn);
return rc;
return pmem_attach_disk(dev, ndns, pmem);
}
static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
@ -565,8 +563,8 @@ static int nd_pmem_probe(struct device *dev)
if (is_nd_btt(dev)) {
/* btt allocates its own request_queue */
devm_remove_action(dev, pmem_release_queue, pmem->pmem_queue);
blk_cleanup_queue(pmem->pmem_queue);
pmem->pmem_queue = NULL;
return nvdimm_namespace_attach_btt(ndns);
}
@ -579,7 +577,6 @@ static int nd_pmem_probe(struct device *dev)
* We'll come back as either btt-pmem, or pfn-pmem, so
* drop the queue allocation for now.
*/
blk_cleanup_queue(pmem->pmem_queue);
return -ENXIO;
}
@ -588,15 +585,8 @@ static int nd_pmem_probe(struct device *dev)
static int nd_pmem_remove(struct device *dev)
{
struct pmem_device *pmem = dev_get_drvdata(dev);
if (is_nd_btt(dev))
nvdimm_namespace_detach_btt(to_nd_btt(dev));
else if (is_nd_pfn(dev))
nvdimm_namespace_detach_pfn(to_nd_pfn(dev));
else
pmem_detach_disk(pmem);
return 0;
}