2019-05-29 22:18:09 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-06-01 03:02:11 +08:00
|
|
|
/*
|
|
|
|
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
|
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/device.h>
|
2016-10-06 05:04:15 +08:00
|
|
|
#include <linux/sort.h>
|
2015-06-01 03:02:11 +08:00
|
|
|
#include <linux/slab.h>
|
2016-09-20 07:04:21 +08:00
|
|
|
#include <linux/list.h>
|
2015-06-01 03:02:11 +08:00
|
|
|
#include <linux/nd.h>
|
2015-06-18 05:14:46 +08:00
|
|
|
#include "nd-core.h"
|
2017-01-14 12:36:58 +08:00
|
|
|
#include "pmem.h"
|
2015-06-01 03:02:11 +08:00
|
|
|
#include "nd.h"
|
|
|
|
|
|
|
|
static void namespace_io_release(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
|
|
|
|
|
|
|
|
kfree(nsio);
|
|
|
|
}
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
static void namespace_pmem_release(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
2016-10-07 14:13:15 +08:00
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2016-10-07 14:13:15 +08:00
|
|
|
if (nspm->id >= 0)
|
|
|
|
ida_simple_remove(&nd_region->ns_ida, nspm->id);
|
2015-06-18 05:14:46 +08:00
|
|
|
kfree(nspm->alt_name);
|
|
|
|
kfree(nspm->uuid);
|
|
|
|
kfree(nspm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void namespace_blk_release(struct device *dev)
|
|
|
|
{
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
|
|
|
|
|
|
if (nsblk->id >= 0)
|
|
|
|
ida_simple_remove(&nd_region->ns_ida, nsblk->id);
|
|
|
|
kfree(nsblk->alt_name);
|
|
|
|
kfree(nsblk->uuid);
|
|
|
|
kfree(nsblk->res);
|
|
|
|
kfree(nsblk);
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
2017-01-25 03:24:07 +08:00
|
|
|
static const struct device_type namespace_io_device_type = {
|
2015-06-01 03:02:11 +08:00
|
|
|
.name = "nd_namespace_io",
|
|
|
|
.release = namespace_io_release,
|
|
|
|
};
|
|
|
|
|
2017-01-25 03:24:07 +08:00
|
|
|
static const struct device_type namespace_pmem_device_type = {
|
2015-06-18 05:14:46 +08:00
|
|
|
.name = "nd_namespace_pmem",
|
|
|
|
.release = namespace_pmem_release,
|
|
|
|
};
|
|
|
|
|
2017-01-25 03:24:07 +08:00
|
|
|
static const struct device_type namespace_blk_device_type = {
|
2015-06-18 05:14:46 +08:00
|
|
|
.name = "nd_namespace_blk",
|
|
|
|
.release = namespace_blk_release,
|
|
|
|
};
|
|
|
|
|
2016-10-06 05:04:15 +08:00
|
|
|
static bool is_namespace_pmem(const struct device *dev)
|
2015-06-18 05:14:46 +08:00
|
|
|
{
|
|
|
|
return dev ? dev->type == &namespace_pmem_device_type : false;
|
|
|
|
}
|
|
|
|
|
2016-10-06 05:04:15 +08:00
|
|
|
static bool is_namespace_blk(const struct device *dev)
|
2015-06-18 05:14:46 +08:00
|
|
|
{
|
|
|
|
return dev ? dev->type == &namespace_blk_device_type : false;
|
|
|
|
}
|
|
|
|
|
2016-10-06 05:04:15 +08:00
|
|
|
static bool is_namespace_io(const struct device *dev)
|
2015-06-18 05:14:46 +08:00
|
|
|
{
|
|
|
|
return dev ? dev->type == &namespace_io_device_type : false;
|
|
|
|
}
|
|
|
|
|
2016-01-06 10:37:23 +08:00
|
|
|
static int is_uuid_busy(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
u8 *uuid1 = data, *uuid2 = NULL;
|
|
|
|
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
|
|
|
uuid2 = nspm->uuid;
|
|
|
|
} else if (is_namespace_blk(dev)) {
|
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
|
|
|
|
uuid2 = nsblk->uuid;
|
|
|
|
} else if (is_nd_btt(dev)) {
|
|
|
|
struct nd_btt *nd_btt = to_nd_btt(dev);
|
|
|
|
|
|
|
|
uuid2 = nd_btt->uuid;
|
|
|
|
} else if (is_nd_pfn(dev)) {
|
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
|
|
|
|
|
|
|
uuid2 = nd_pfn->uuid;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int is_namespace_uuid_busy(struct device *dev, void *data)
|
|
|
|
{
|
2017-05-30 14:12:19 +08:00
|
|
|
if (is_nd_region(dev))
|
2016-01-06 10:37:23 +08:00
|
|
|
return device_for_each_child(dev, data, is_uuid_busy);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nd_is_uuid_unique - verify that no other namespace has @uuid
|
|
|
|
* @dev: any device on a nvdimm_bus
|
|
|
|
* @uuid: uuid to check
|
|
|
|
*/
|
|
|
|
bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
|
|
|
|
|
|
|
|
if (!nvdimm_bus)
|
|
|
|
return false;
|
|
|
|
WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
|
|
|
|
if (device_for_each_child(&nvdimm_bus->dev, uuid,
|
|
|
|
is_namespace_uuid_busy) != 0)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-08-25 07:20:23 +08:00
|
|
|
bool pmem_should_map_pages(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
2019-01-25 09:33:06 +08:00
|
|
|
struct nd_namespace_common *ndns = to_ndns(dev);
|
2016-03-04 01:38:00 +08:00
|
|
|
struct nd_namespace_io *nsio;
|
2015-08-25 07:20:23 +08:00
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (is_nd_pfn(dev) || is_nd_btt(dev))
|
|
|
|
return false;
|
|
|
|
|
2019-01-25 09:33:06 +08:00
|
|
|
if (ndns->force_raw)
|
|
|
|
return false;
|
|
|
|
|
2016-03-04 01:38:00 +08:00
|
|
|
nsio = to_nd_namespace_io(dev);
|
|
|
|
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
|
|
|
|
IORESOURCE_SYSTEM_RAM,
|
|
|
|
IORES_DESC_NONE) == REGION_MIXED)
|
|
|
|
return false;
|
|
|
|
|
2015-08-25 07:20:23 +08:00
|
|
|
return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pmem_should_map_pages);
|
|
|
|
|
2017-06-04 11:12:07 +08:00
|
|
|
unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
|
|
|
|
{
|
|
|
|
if (is_namespace_pmem(&ndns->dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm;
|
|
|
|
|
|
|
|
nspm = to_nd_namespace_pmem(&ndns->dev);
|
|
|
|
if (nspm->lbasize == 0 || nspm->lbasize == 512)
|
|
|
|
/* default */;
|
|
|
|
else if (nspm->lbasize == 4096)
|
|
|
|
return 4096;
|
|
|
|
else
|
|
|
|
dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
|
|
|
|
nspm->lbasize);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There is no namespace label (is_namespace_io()), or the label
|
|
|
|
* indicates the default sector size.
|
|
|
|
*/
|
|
|
|
return 512;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pmem_sector_size);
|
|
|
|
|
nd_btt: atomic sector updates
BTT stands for Block Translation Table, and is a way to provide power
fail sector atomicity semantics for block devices that have the ability
to perform byte granularity IO. It relies on the capability of libnvdimm
namespace devices to do byte aligned IO.
The BTT works as a stacked blocked device, and reserves a chunk of space
from the backing device for its accounting metadata. It is a bio-based
driver because all IO is done synchronously, and there is no queuing or
asynchronous completions at either the device or the driver level.
The BTT uses 'lanes' to index into various 'on-disk' data structures,
and lanes also act as a synchronization mechanism in case there are more
CPUs than available lanes. We did a comparison between two lane lock
strategies - first where we kept an atomic counter around that tracked
which was the last lane that was used, and 'our' lane was determined by
atomically incrementing that. That way, for the nr_cpus > nr_lanes case,
theoretically, no CPU would be blocked waiting for a lane. The other
strategy was to use the cpu number we're scheduled on to and hash it to
a lane number. Theoretically, this could block an IO that could've
otherwise run using a different, free lane. But some fio workloads
showed that the direct cpu -> lane hash performed faster than tracking
'last lane' - my reasoning is the cache thrash caused by moving the
atomic variable made that approach slower than simply waiting out the
in-progress IO. This supports the conclusion that the driver can be a
very simple bio-based one that does synchronous IOs instead of queuing.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Boaz Harrosh <boaz@plexistor.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jens Axboe <axboe@fb.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Neil Brown <neilb@suse.de>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
[jmoyer: fix nmi watchdog timeout in btt_map_init]
[jmoyer: move btt initialization to module load path]
[jmoyer: fix memory leak in the btt initialization path]
[jmoyer: Don't overwrite corrupted arenas]
Signed-off-by: Vishal Verma <vishal.l.verma@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-25 16:20:32 +08:00
|
|
|
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
|
|
|
|
char *name)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
|
2015-08-25 07:20:23 +08:00
|
|
|
const char *suffix = NULL;
|
nd_btt: atomic sector updates
BTT stands for Block Translation Table, and is a way to provide power
fail sector atomicity semantics for block devices that have the ability
to perform byte granularity IO. It relies on the capability of libnvdimm
namespace devices to do byte aligned IO.
The BTT works as a stacked blocked device, and reserves a chunk of space
from the backing device for its accounting metadata. It is a bio-based
driver because all IO is done synchronously, and there is no queuing or
asynchronous completions at either the device or the driver level.
The BTT uses 'lanes' to index into various 'on-disk' data structures,
and lanes also act as a synchronization mechanism in case there are more
CPUs than available lanes. We did a comparison between two lane lock
strategies - first where we kept an atomic counter around that tracked
which was the last lane that was used, and 'our' lane was determined by
atomically incrementing that. That way, for the nr_cpus > nr_lanes case,
theoretically, no CPU would be blocked waiting for a lane. The other
strategy was to use the cpu number we're scheduled on to and hash it to
a lane number. Theoretically, this could block an IO that could've
otherwise run using a different, free lane. But some fio workloads
showed that the direct cpu -> lane hash performed faster than tracking
'last lane' - my reasoning is the cache thrash caused by moving the
atomic variable made that approach slower than simply waiting out the
in-progress IO. This supports the conclusion that the driver can be a
very simple bio-based one that does synchronous IOs instead of queuing.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Boaz Harrosh <boaz@plexistor.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jens Axboe <axboe@fb.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Neil Brown <neilb@suse.de>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
[jmoyer: fix nmi watchdog timeout in btt_map_init]
[jmoyer: move btt initialization to module load path]
[jmoyer: fix memory leak in the btt initialization path]
[jmoyer: Don't overwrite corrupted arenas]
Signed-off-by: Vishal Verma <vishal.l.verma@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-25 16:20:32 +08:00
|
|
|
|
2015-12-15 07:34:15 +08:00
|
|
|
if (ndns->claim && is_nd_btt(ndns->claim))
|
|
|
|
suffix = "s";
|
nd_btt: atomic sector updates
BTT stands for Block Translation Table, and is a way to provide power
fail sector atomicity semantics for block devices that have the ability
to perform byte granularity IO. It relies on the capability of libnvdimm
namespace devices to do byte aligned IO.
The BTT works as a stacked blocked device, and reserves a chunk of space
from the backing device for its accounting metadata. It is a bio-based
driver because all IO is done synchronously, and there is no queuing or
asynchronous completions at either the device or the driver level.
The BTT uses 'lanes' to index into various 'on-disk' data structures,
and lanes also act as a synchronization mechanism in case there are more
CPUs than available lanes. We did a comparison between two lane lock
strategies - first where we kept an atomic counter around that tracked
which was the last lane that was used, and 'our' lane was determined by
atomically incrementing that. That way, for the nr_cpus > nr_lanes case,
theoretically, no CPU would be blocked waiting for a lane. The other
strategy was to use the cpu number we're scheduled on to and hash it to
a lane number. Theoretically, this could block an IO that could've
otherwise run using a different, free lane. But some fio workloads
showed that the direct cpu -> lane hash performed faster than tracking
'last lane' - my reasoning is the cache thrash caused by moving the
atomic variable made that approach slower than simply waiting out the
in-progress IO. This supports the conclusion that the driver can be a
very simple bio-based one that does synchronous IOs instead of queuing.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Boaz Harrosh <boaz@plexistor.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jens Axboe <axboe@fb.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Neil Brown <neilb@suse.de>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
[jmoyer: fix nmi watchdog timeout in btt_map_init]
[jmoyer: move btt initialization to module load path]
[jmoyer: fix memory leak in the btt initialization path]
[jmoyer: Don't overwrite corrupted arenas]
Signed-off-by: Vishal Verma <vishal.l.verma@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-25 16:20:32 +08:00
|
|
|
|
2015-08-25 07:20:23 +08:00
|
|
|
if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
|
2016-10-06 00:09:44 +08:00
|
|
|
int nsidx = 0;
|
|
|
|
|
|
|
|
if (is_namespace_pmem(&ndns->dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm;
|
|
|
|
|
|
|
|
nspm = to_nd_namespace_pmem(&ndns->dev);
|
|
|
|
nsidx = nspm->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nsidx)
|
|
|
|
sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
|
|
|
|
suffix ? suffix : "");
|
|
|
|
else
|
|
|
|
sprintf(name, "pmem%d%s", nd_region->id,
|
|
|
|
suffix ? suffix : "");
|
2015-08-25 07:20:23 +08:00
|
|
|
} else if (is_namespace_blk(&ndns->dev)) {
|
nd_btt: atomic sector updates
BTT stands for Block Translation Table, and is a way to provide power
fail sector atomicity semantics for block devices that have the ability
to perform byte granularity IO. It relies on the capability of libnvdimm
namespace devices to do byte aligned IO.
The BTT works as a stacked blocked device, and reserves a chunk of space
from the backing device for its accounting metadata. It is a bio-based
driver because all IO is done synchronously, and there is no queuing or
asynchronous completions at either the device or the driver level.
The BTT uses 'lanes' to index into various 'on-disk' data structures,
and lanes also act as a synchronization mechanism in case there are more
CPUs than available lanes. We did a comparison between two lane lock
strategies - first where we kept an atomic counter around that tracked
which was the last lane that was used, and 'our' lane was determined by
atomically incrementing that. That way, for the nr_cpus > nr_lanes case,
theoretically, no CPU would be blocked waiting for a lane. The other
strategy was to use the cpu number we're scheduled on to and hash it to
a lane number. Theoretically, this could block an IO that could've
otherwise run using a different, free lane. But some fio workloads
showed that the direct cpu -> lane hash performed faster than tracking
'last lane' - my reasoning is the cache thrash caused by moving the
atomic variable made that approach slower than simply waiting out the
in-progress IO. This supports the conclusion that the driver can be a
very simple bio-based one that does synchronous IOs instead of queuing.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Boaz Harrosh <boaz@plexistor.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jens Axboe <axboe@fb.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Neil Brown <neilb@suse.de>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
[jmoyer: fix nmi watchdog timeout in btt_map_init]
[jmoyer: move btt initialization to module load path]
[jmoyer: fix memory leak in the btt initialization path]
[jmoyer: Don't overwrite corrupted arenas]
Signed-off-by: Vishal Verma <vishal.l.verma@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-25 16:20:32 +08:00
|
|
|
struct nd_namespace_blk *nsblk;
|
|
|
|
|
|
|
|
nsblk = to_nd_namespace_blk(&ndns->dev);
|
2015-08-25 07:20:23 +08:00
|
|
|
sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
|
|
|
|
suffix ? suffix : "");
|
nd_btt: atomic sector updates
BTT stands for Block Translation Table, and is a way to provide power
fail sector atomicity semantics for block devices that have the ability
to perform byte granularity IO. It relies on the capability of libnvdimm
namespace devices to do byte aligned IO.
The BTT works as a stacked blocked device, and reserves a chunk of space
from the backing device for its accounting metadata. It is a bio-based
driver because all IO is done synchronously, and there is no queuing or
asynchronous completions at either the device or the driver level.
The BTT uses 'lanes' to index into various 'on-disk' data structures,
and lanes also act as a synchronization mechanism in case there are more
CPUs than available lanes. We did a comparison between two lane lock
strategies - first where we kept an atomic counter around that tracked
which was the last lane that was used, and 'our' lane was determined by
atomically incrementing that. That way, for the nr_cpus > nr_lanes case,
theoretically, no CPU would be blocked waiting for a lane. The other
strategy was to use the cpu number we're scheduled on to and hash it to
a lane number. Theoretically, this could block an IO that could've
otherwise run using a different, free lane. But some fio workloads
showed that the direct cpu -> lane hash performed faster than tracking
'last lane' - my reasoning is the cache thrash caused by moving the
atomic variable made that approach slower than simply waiting out the
in-progress IO. This supports the conclusion that the driver can be a
very simple bio-based one that does synchronous IOs instead of queuing.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Boaz Harrosh <boaz@plexistor.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jens Axboe <axboe@fb.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Neil Brown <neilb@suse.de>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
[jmoyer: fix nmi watchdog timeout in btt_map_init]
[jmoyer: move btt initialization to module load path]
[jmoyer: fix memory leak in the btt initialization path]
[jmoyer: Don't overwrite corrupted arenas]
Signed-off-by: Vishal Verma <vishal.l.verma@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-25 16:20:32 +08:00
|
|
|
} else {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return name;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nvdimm_namespace_disk_name);
|
|
|
|
|
2015-07-30 04:58:09 +08:00
|
|
|
const u8 *nd_dev_to_uuid(struct device *dev)
|
|
|
|
{
|
|
|
|
static const u8 null_uuid[16];
|
|
|
|
|
|
|
|
if (!dev)
|
|
|
|
return null_uuid;
|
|
|
|
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
|
|
|
return nspm->uuid;
|
|
|
|
} else if (is_namespace_blk(dev)) {
|
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
|
|
|
|
return nsblk->uuid;
|
|
|
|
} else
|
|
|
|
return null_uuid;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nd_dev_to_uuid);
|
|
|
|
|
2015-06-01 03:02:11 +08:00
|
|
|
static ssize_t nstype_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
|
|
|
|
|
|
return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(nstype);
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
static ssize_t __alt_name_store(struct device *dev, const char *buf,
|
|
|
|
const size_t len)
|
|
|
|
{
|
|
|
|
char *input, *pos, *alt_name, **ns_altname;
|
|
|
|
ssize_t rc;
|
|
|
|
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
|
|
|
ns_altname = &nspm->alt_name;
|
|
|
|
} else if (is_namespace_blk(dev)) {
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
|
|
|
|
ns_altname = &nsblk->alt_name;
|
2015-06-18 05:14:46 +08:00
|
|
|
} else
|
|
|
|
return -ENXIO;
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
if (dev->driver || to_ndns(dev)->claim)
|
2015-06-18 05:14:46 +08:00
|
|
|
return -EBUSY;
|
|
|
|
|
2018-06-11 21:47:21 +08:00
|
|
|
input = kstrndup(buf, len, GFP_KERNEL);
|
2015-06-18 05:14:46 +08:00
|
|
|
if (!input)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pos = strim(input);
|
|
|
|
if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
|
|
|
|
if (!alt_name) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
kfree(*ns_altname);
|
|
|
|
*ns_altname = alt_name;
|
|
|
|
sprintf(*ns_altname, "%s", pos);
|
|
|
|
rc = len;
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(input);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
|
|
|
|
{
|
2015-06-25 16:20:04 +08:00
|
|
|
struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
struct nd_label_id label_id;
|
|
|
|
resource_size_t size = 0;
|
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
if (!nsblk->uuid)
|
|
|
|
return 0;
|
|
|
|
nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
|
|
|
|
for_each_dpa_resource(ndd, res)
|
|
|
|
if (strcmp(res->name, label_id.id) == 0)
|
|
|
|
size += resource_size(res);
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2015-06-25 16:21:02 +08:00
|
|
|
static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
struct nd_label_id label_id;
|
|
|
|
struct resource *res;
|
|
|
|
int count, i;
|
|
|
|
|
|
|
|
if (!nsblk->uuid || !nsblk->lbasize || !ndd)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
|
|
|
|
for_each_dpa_resource(ndd, res) {
|
|
|
|
if (strcmp(res->name, label_id.id) != 0)
|
|
|
|
continue;
|
|
|
|
/*
|
2016-08-31 17:45:25 +08:00
|
|
|
* Resources with unacknowledged adjustments indicate a
|
2015-06-25 16:21:02 +08:00
|
|
|
* failure to update labels
|
|
|
|
*/
|
|
|
|
if (res->flags & DPA_RESOURCE_ADJUSTED)
|
|
|
|
return false;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* These values match after a successful label update */
|
|
|
|
if (count != nsblk->num_resources)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (i = 0; i < nsblk->num_resources; i++) {
|
|
|
|
struct resource *found = NULL;
|
|
|
|
|
|
|
|
for_each_dpa_resource(ndd, res)
|
|
|
|
if (res == nsblk->res[i]) {
|
|
|
|
found = res;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* stale resource */
|
|
|
|
if (!found)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
|
|
|
|
{
|
|
|
|
resource_size_t size;
|
|
|
|
|
|
|
|
nvdimm_bus_lock(&nsblk->common.dev);
|
|
|
|
size = __nd_namespace_blk_validate(nsblk);
|
|
|
|
nvdimm_bus_unlock(&nsblk->common.dev);
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nd_namespace_blk_validate);
|
|
|
|
|
|
|
|
|
2015-05-31 00:36:02 +08:00
|
|
|
static int nd_namespace_label_update(struct nd_region *nd_region,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
2015-06-25 16:20:04 +08:00
|
|
|
dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
|
2015-05-31 00:36:02 +08:00
|
|
|
"namespace must be idle during label update\n");
|
2015-06-25 16:20:04 +08:00
|
|
|
if (dev->driver || to_ndns(dev)->claim)
|
2015-05-31 00:36:02 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only allow label writes that will result in a valid namespace
|
|
|
|
* or deletion of an existing namespace.
|
|
|
|
*/
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
2015-05-31 00:35:36 +08:00
|
|
|
resource_size_t size = resource_size(&nspm->nsio.res);
|
2015-05-31 00:36:02 +08:00
|
|
|
|
|
|
|
if (size == 0 && nspm->uuid)
|
|
|
|
/* delete allocation */;
|
|
|
|
else if (!nspm->uuid)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return nd_pmem_namespace_label_update(nd_region, nspm, size);
|
|
|
|
} else if (is_namespace_blk(dev)) {
|
2015-05-31 00:35:36 +08:00
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
resource_size_t size = nd_namespace_blk_size(nsblk);
|
|
|
|
|
|
|
|
if (size == 0 && nsblk->uuid)
|
|
|
|
/* delete allocation */;
|
|
|
|
else if (!nsblk->uuid || !nsblk->lbasize)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return nd_blk_namespace_label_update(nd_region, nsblk, size);
|
2015-05-31 00:36:02 +08:00
|
|
|
} else
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
static ssize_t alt_name_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
|
{
|
2015-05-31 00:36:02 +08:00
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
2015-06-18 05:14:46 +08:00
|
|
|
ssize_t rc;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_lock(dev);
|
2015-06-18 05:14:46 +08:00
|
|
|
nvdimm_bus_lock(dev);
|
|
|
|
wait_nvdimm_bus_probe_idle(dev);
|
|
|
|
rc = __alt_name_store(dev, buf, len);
|
2015-05-31 00:36:02 +08:00
|
|
|
if (rc >= 0)
|
|
|
|
rc = nd_namespace_label_update(nd_region, dev);
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
|
2015-06-18 05:14:46 +08:00
|
|
|
nvdimm_bus_unlock(dev);
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_unlock(dev);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2015-05-31 00:36:02 +08:00
|
|
|
return rc < 0 ? rc : len;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t alt_name_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
char *ns_altname;
|
|
|
|
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
|
|
|
ns_altname = nspm->alt_name;
|
|
|
|
} else if (is_namespace_blk(dev)) {
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
|
|
|
|
ns_altname = nsblk->alt_name;
|
2015-06-18 05:14:46 +08:00
|
|
|
} else
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(alt_name);
|
|
|
|
|
|
|
|
static int scan_free(struct nd_region *nd_region,
|
|
|
|
struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
|
|
|
|
resource_size_t n)
|
|
|
|
{
|
|
|
|
bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
while (n) {
|
|
|
|
struct resource *res, *last;
|
|
|
|
resource_size_t new_start;
|
|
|
|
|
|
|
|
last = NULL;
|
|
|
|
for_each_dpa_resource(ndd, res)
|
|
|
|
if (strcmp(res->name, label_id->id) == 0)
|
|
|
|
last = res;
|
|
|
|
res = last;
|
|
|
|
if (!res)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (n >= resource_size(res)) {
|
|
|
|
n -= resource_size(res);
|
|
|
|
nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
|
|
|
|
nvdimm_free_dpa(ndd, res);
|
|
|
|
/* retry with last resource deleted */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Keep BLK allocations relegated to high DPA as much as
|
|
|
|
* possible
|
|
|
|
*/
|
|
|
|
if (is_blk)
|
|
|
|
new_start = res->start + n;
|
|
|
|
else
|
|
|
|
new_start = res->start;
|
|
|
|
|
|
|
|
rc = adjust_resource(res, new_start, resource_size(res) - n);
|
2015-05-02 01:34:01 +08:00
|
|
|
if (rc == 0)
|
|
|
|
res->flags |= DPA_RESOURCE_ADJUSTED;
|
2015-06-18 05:14:46 +08:00
|
|
|
nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* shrink_dpa_allocation - for each dimm in region free n bytes for label_id
|
|
|
|
* @nd_region: the set of dimms to reclaim @n bytes from
|
|
|
|
* @label_id: unique identifier for the namespace consuming this dpa range
|
|
|
|
* @n: number of bytes per-dimm to release
|
|
|
|
*
|
|
|
|
* Assumes resources are ordered. Starting from the end try to
|
|
|
|
* adjust_resource() the allocation to @n, but if @n is larger than the
|
|
|
|
* allocation delete it and find the 'new' last allocation in the label
|
|
|
|
* set.
|
|
|
|
*/
|
|
|
|
static int shrink_dpa_allocation(struct nd_region *nd_region,
|
|
|
|
struct nd_label_id *label_id, resource_size_t n)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = scan_free(nd_region, nd_mapping, label_id, n);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
|
|
|
|
struct nd_region *nd_region, struct nd_mapping *nd_mapping,
|
|
|
|
resource_size_t n)
|
|
|
|
{
|
|
|
|
bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
resource_size_t first_dpa;
|
|
|
|
struct resource *res;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
/* allocate blk from highest dpa first */
|
|
|
|
if (is_blk)
|
|
|
|
first_dpa = nd_mapping->start + nd_mapping->size - n;
|
|
|
|
else
|
|
|
|
first_dpa = nd_mapping->start;
|
|
|
|
|
|
|
|
/* first resource allocation for this label-id or dimm */
|
|
|
|
res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
|
|
|
|
if (!res)
|
|
|
|
rc = -EBUSY;
|
|
|
|
|
|
|
|
nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
|
|
|
|
return rc ? n : 0;
|
|
|
|
}
|
|
|
|
|
2016-10-05 07:09:59 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* space_valid() - validate free dpa space against constraints
|
|
|
|
* @nd_region: hosting region of the free space
|
|
|
|
* @ndd: dimm device data for debug
|
|
|
|
* @label_id: namespace id to allocate space
|
|
|
|
* @prev: potential allocation that precedes free space
|
|
|
|
* @next: allocation that follows the given free space range
|
|
|
|
* @exist: first allocation with same id in the mapping
|
|
|
|
* @n: range that must satisfied for pmem allocations
|
|
|
|
* @valid: free space range to validate
|
|
|
|
*
|
|
|
|
* BLK-space is valid as long as it does not precede a PMEM
|
|
|
|
* allocation in a given region. PMEM-space must be contiguous
|
|
|
|
* and adjacent to an existing existing allocation (if one
|
|
|
|
* exists). If reserving PMEM any space is valid.
|
|
|
|
*/
|
|
|
|
static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
|
|
|
|
struct nd_label_id *label_id, struct resource *prev,
|
|
|
|
struct resource *next, struct resource *exist,
|
|
|
|
resource_size_t n, struct resource *valid)
|
2015-06-18 05:14:46 +08:00
|
|
|
{
|
2016-10-05 07:09:59 +08:00
|
|
|
bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
|
|
|
|
bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
|
|
|
|
|
|
|
|
if (valid->start >= valid->end)
|
|
|
|
goto invalid;
|
|
|
|
|
|
|
|
if (is_reserve)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!is_pmem) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
|
|
|
|
struct nvdimm_bus *nvdimm_bus;
|
|
|
|
struct blk_alloc_info info = {
|
|
|
|
.nd_mapping = nd_mapping,
|
|
|
|
.available = nd_mapping->size,
|
|
|
|
.res = valid,
|
|
|
|
};
|
|
|
|
|
|
|
|
WARN_ON(!is_nd_blk(&nd_region->dev));
|
|
|
|
nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
|
|
|
|
device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocation needs to be contiguous, so this is all or nothing */
|
|
|
|
if (resource_size(valid) < n)
|
|
|
|
goto invalid;
|
|
|
|
|
|
|
|
/* we've got all the space we need and no existing allocation */
|
|
|
|
if (!exist)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* allocation needs to be contiguous with the existing namespace */
|
|
|
|
if (valid->start == exist->end + 1
|
|
|
|
|| valid->end == exist->start - 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
invalid:
|
|
|
|
/* truncate @valid size to 0 */
|
|
|
|
valid->end = valid->start - 1;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
enum alloc_loc {
|
|
|
|
ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
|
|
|
|
};
|
|
|
|
|
|
|
|
static resource_size_t scan_allocate(struct nd_region *nd_region,
|
|
|
|
struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
|
|
|
|
resource_size_t n)
|
|
|
|
{
|
|
|
|
resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
|
|
|
|
bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
2016-10-05 07:09:59 +08:00
|
|
|
struct resource *res, *exist = NULL, valid;
|
2015-06-18 05:14:46 +08:00
|
|
|
const resource_size_t to_allocate = n;
|
|
|
|
int first;
|
|
|
|
|
2016-10-05 07:09:59 +08:00
|
|
|
for_each_dpa_resource(ndd, res)
|
|
|
|
if (strcmp(label_id->id, res->name) == 0)
|
|
|
|
exist = res;
|
|
|
|
|
|
|
|
valid.start = nd_mapping->start;
|
|
|
|
valid.end = mapping_end;
|
|
|
|
valid.name = "free space";
|
2015-06-18 05:14:46 +08:00
|
|
|
retry:
|
|
|
|
first = 0;
|
|
|
|
for_each_dpa_resource(ndd, res) {
|
|
|
|
struct resource *next = res->sibling, *new_res = NULL;
|
2016-10-05 07:09:59 +08:00
|
|
|
resource_size_t allocate, available = 0;
|
2015-06-18 05:14:46 +08:00
|
|
|
enum alloc_loc loc = ALLOC_ERR;
|
|
|
|
const char *action;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
/* ignore resources outside this nd_mapping */
|
|
|
|
if (res->start > mapping_end)
|
|
|
|
continue;
|
|
|
|
if (res->end < nd_mapping->start)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* space at the beginning of the mapping */
|
|
|
|
if (!first++ && res->start > nd_mapping->start) {
|
2016-10-05 07:09:59 +08:00
|
|
|
valid.start = nd_mapping->start;
|
|
|
|
valid.end = res->start - 1;
|
|
|
|
space_valid(nd_region, ndd, label_id, NULL, next, exist,
|
|
|
|
to_allocate, &valid);
|
|
|
|
available = resource_size(&valid);
|
|
|
|
if (available)
|
2015-06-18 05:14:46 +08:00
|
|
|
loc = ALLOC_BEFORE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* space between allocations */
|
|
|
|
if (!loc && next) {
|
2016-10-05 07:09:59 +08:00
|
|
|
valid.start = res->start + resource_size(res);
|
|
|
|
valid.end = min(mapping_end, next->start - 1);
|
|
|
|
space_valid(nd_region, ndd, label_id, res, next, exist,
|
|
|
|
to_allocate, &valid);
|
|
|
|
available = resource_size(&valid);
|
|
|
|
if (available)
|
2015-06-18 05:14:46 +08:00
|
|
|
loc = ALLOC_MID;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* space at the end of the mapping */
|
|
|
|
if (!loc && !next) {
|
2016-10-05 07:09:59 +08:00
|
|
|
valid.start = res->start + resource_size(res);
|
|
|
|
valid.end = mapping_end;
|
|
|
|
space_valid(nd_region, ndd, label_id, res, next, exist,
|
|
|
|
to_allocate, &valid);
|
|
|
|
available = resource_size(&valid);
|
|
|
|
if (available)
|
2015-06-18 05:14:46 +08:00
|
|
|
loc = ALLOC_AFTER;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!loc || !available)
|
|
|
|
continue;
|
|
|
|
allocate = min(available, n);
|
|
|
|
switch (loc) {
|
|
|
|
case ALLOC_BEFORE:
|
|
|
|
if (strcmp(res->name, label_id->id) == 0) {
|
|
|
|
/* adjust current resource up */
|
|
|
|
rc = adjust_resource(res, res->start - allocate,
|
|
|
|
resource_size(res) + allocate);
|
|
|
|
action = "cur grow up";
|
|
|
|
} else
|
|
|
|
action = "allocate";
|
|
|
|
break;
|
|
|
|
case ALLOC_MID:
|
|
|
|
if (strcmp(next->name, label_id->id) == 0) {
|
|
|
|
/* adjust next resource up */
|
|
|
|
rc = adjust_resource(next, next->start
|
|
|
|
- allocate, resource_size(next)
|
|
|
|
+ allocate);
|
|
|
|
new_res = next;
|
|
|
|
action = "next grow up";
|
|
|
|
} else if (strcmp(res->name, label_id->id) == 0) {
|
|
|
|
action = "grow down";
|
|
|
|
} else
|
|
|
|
action = "allocate";
|
|
|
|
break;
|
|
|
|
case ALLOC_AFTER:
|
|
|
|
if (strcmp(res->name, label_id->id) == 0)
|
|
|
|
action = "grow down";
|
|
|
|
else
|
|
|
|
action = "allocate";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strcmp(action, "allocate") == 0) {
|
|
|
|
/* BLK allocate bottom up */
|
|
|
|
if (!is_pmem)
|
2016-10-05 07:09:59 +08:00
|
|
|
valid.start += available - allocate;
|
2015-06-18 05:14:46 +08:00
|
|
|
|
|
|
|
new_res = nvdimm_allocate_dpa(ndd, label_id,
|
2016-10-05 07:09:59 +08:00
|
|
|
valid.start, allocate);
|
2015-06-18 05:14:46 +08:00
|
|
|
if (!new_res)
|
|
|
|
rc = -EBUSY;
|
|
|
|
} else if (strcmp(action, "grow down") == 0) {
|
|
|
|
/* adjust current resource down */
|
|
|
|
rc = adjust_resource(res, res->start, resource_size(res)
|
|
|
|
+ allocate);
|
2015-05-02 01:34:01 +08:00
|
|
|
if (rc == 0)
|
|
|
|
res->flags |= DPA_RESOURCE_ADJUSTED;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!new_res)
|
|
|
|
new_res = res;
|
|
|
|
|
|
|
|
nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
|
|
|
|
action, loc, rc);
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return n;
|
|
|
|
|
|
|
|
n -= allocate;
|
|
|
|
if (n) {
|
|
|
|
/*
|
|
|
|
* Retry scan with newly inserted resources.
|
|
|
|
* For example, if we did an ALLOC_BEFORE
|
|
|
|
* insertion there may also have been space
|
|
|
|
* available for an ALLOC_AFTER insertion, so we
|
|
|
|
* need to check this same resource again
|
|
|
|
*/
|
|
|
|
goto retry;
|
|
|
|
} else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
/*
|
|
|
|
* If we allocated nothing in the BLK case it may be because we are in
|
|
|
|
* an initial "pmem-reserve pass". Only do an initial BLK allocation
|
|
|
|
* when none of the DPA space is reserved.
|
|
|
|
*/
|
|
|
|
if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
|
2015-06-18 05:14:46 +08:00
|
|
|
return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
static int merge_dpa(struct nd_region *nd_region,
|
|
|
|
struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
|
|
|
|
{
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
if (strncmp("pmem", label_id->id, 4) == 0)
|
|
|
|
return 0;
|
|
|
|
retry:
|
|
|
|
for_each_dpa_resource(ndd, res) {
|
|
|
|
int rc;
|
|
|
|
struct resource *next = res->sibling;
|
|
|
|
resource_size_t end = res->start + resource_size(res);
|
|
|
|
|
|
|
|
if (!next || strcmp(res->name, label_id->id) != 0
|
|
|
|
|| strcmp(next->name, label_id->id) != 0
|
|
|
|
|| end != next->start)
|
|
|
|
continue;
|
|
|
|
end += resource_size(next);
|
|
|
|
nvdimm_free_dpa(ndd, next);
|
|
|
|
rc = adjust_resource(res, res->start, end - res->start);
|
|
|
|
nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
res->flags |= DPA_RESOURCE_ADJUSTED;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-25 05:07:57 +08:00
|
|
|
int __reserve_free_pmem(struct device *dev, void *data)
|
2015-05-02 01:34:01 +08:00
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = data;
|
|
|
|
struct nd_region *nd_region;
|
|
|
|
struct nd_label_id label_id;
|
|
|
|
int i;
|
|
|
|
|
2017-05-30 14:12:19 +08:00
|
|
|
if (!is_memory(dev))
|
2015-05-02 01:34:01 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
nd_region = to_nd_region(dev);
|
|
|
|
if (nd_region->ndr_mappings == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
memset(&label_id, 0, sizeof(label_id));
|
|
|
|
strcat(label_id.id, "pmem-reserve");
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
|
|
resource_size_t n, rem = 0;
|
|
|
|
|
|
|
|
if (nd_mapping->nvdimm != nvdimm)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
|
|
|
|
if (n == 0)
|
|
|
|
return 0;
|
|
|
|
rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
|
|
|
|
dev_WARN_ONCE(&nd_region->dev, rem,
|
|
|
|
"pmem reserve underrun: %#llx of %#llx bytes\n",
|
|
|
|
(unsigned long long) n - rem,
|
|
|
|
(unsigned long long) n);
|
|
|
|
return rem ? -ENXIO : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-25 05:07:57 +08:00
|
|
|
void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_mapping *nd_mapping)
|
|
|
|
{
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
struct resource *res, *_res;
|
|
|
|
|
|
|
|
for_each_dpa_resource_safe(ndd, res, _res)
|
|
|
|
if (strcmp(res->name, "pmem-reserve") == 0)
|
|
|
|
nvdimm_free_dpa(ndd, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
|
|
|
|
struct nd_mapping *nd_mapping)
|
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = nd_mapping->nvdimm;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
|
|
|
|
__reserve_free_pmem);
|
|
|
|
if (rc)
|
|
|
|
release_free_pmem(nvdimm_bus, nd_mapping);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
/**
|
|
|
|
* grow_dpa_allocation - for each dimm allocate n bytes for @label_id
|
|
|
|
* @nd_region: the set of dimms to allocate @n more bytes from
|
|
|
|
* @label_id: unique identifier for the namespace consuming this dpa range
|
|
|
|
* @n: number of bytes per-dimm to add to the existing allocation
|
|
|
|
*
|
|
|
|
* Assumes resources are ordered. For BLK regions, first consume
|
|
|
|
* BLK-only available DPA free space, then consume PMEM-aliased DPA
|
|
|
|
* space starting at the highest DPA. For PMEM regions start
|
|
|
|
* allocations from the start of an interleave set and end at the first
|
|
|
|
* BLK allocation or the end of the interleave set, whichever comes
|
|
|
|
* first.
|
|
|
|
*/
|
|
|
|
static int grow_dpa_allocation(struct nd_region *nd_region,
|
|
|
|
struct nd_label_id *label_id, resource_size_t n)
|
|
|
|
{
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
|
|
|
|
bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
|
2015-06-18 05:14:46 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
2015-05-02 01:34:01 +08:00
|
|
|
resource_size_t rem = n;
|
|
|
|
int rc, j;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In the BLK case try once with all unallocated PMEM
|
|
|
|
* reserved, and once without
|
|
|
|
*/
|
|
|
|
for (j = is_pmem; j < 2; j++) {
|
|
|
|
bool blk_only = j == 0;
|
|
|
|
|
|
|
|
if (blk_only) {
|
|
|
|
rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
rem = scan_allocate(nd_region, nd_mapping,
|
|
|
|
label_id, rem);
|
|
|
|
if (blk_only)
|
|
|
|
release_free_pmem(nvdimm_bus, nd_mapping);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
/* try again and allow encroachments into PMEM */
|
|
|
|
if (rem == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_WARN_ONCE(&nd_region->dev, rem,
|
|
|
|
"allocation underrun: %#llx of %#llx bytes\n",
|
|
|
|
(unsigned long long) n - rem,
|
|
|
|
(unsigned long long) n);
|
|
|
|
if (rem)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
rc = merge_dpa(nd_region, nd_mapping, label_id);
|
2015-06-18 05:14:46 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-07 14:13:15 +08:00
|
|
|
static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
|
2015-06-18 05:14:46 +08:00
|
|
|
struct nd_namespace_pmem *nspm, resource_size_t size)
|
|
|
|
{
|
|
|
|
struct resource *res = &nspm->nsio.res;
|
2016-10-07 14:13:15 +08:00
|
|
|
resource_size_t offset = 0;
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2016-10-07 14:13:15 +08:00
|
|
|
if (size && !nspm->uuid) {
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size && nspm->uuid) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
struct nd_label_id label_id;
|
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
if (!ndd) {
|
|
|
|
size = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
nd_label_gen_id(&label_id, nspm->uuid, 0);
|
|
|
|
|
|
|
|
/* calculate a spa offset from the dpa allocation offset */
|
|
|
|
for_each_dpa_resource(ndd, res)
|
|
|
|
if (strcmp(res->name, label_id.id) == 0) {
|
|
|
|
offset = (res->start - nd_mapping->start)
|
|
|
|
* nd_region->ndr_mappings;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
res->start = nd_region->ndr_start + offset;
|
|
|
|
res->end = res->start + size - 1;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
2015-12-02 05:48:12 +08:00
|
|
|
static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
|
|
|
|
{
|
|
|
|
if (!uuid) {
|
|
|
|
dev_dbg(dev, "%s: uuid not set\n", where);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
static ssize_t __size_store(struct device *dev, unsigned long long val)
|
|
|
|
{
|
|
|
|
resource_size_t allocated = 0, available = 0;
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
2017-01-10 09:30:49 +08:00
|
|
|
struct nd_namespace_common *ndns = to_ndns(dev);
|
2015-06-18 05:14:46 +08:00
|
|
|
struct nd_mapping *nd_mapping;
|
|
|
|
struct nvdimm_drvdata *ndd;
|
|
|
|
struct nd_label_id label_id;
|
|
|
|
u32 flags = 0, remainder;
|
libnvdimm, namespace: do not delete namespace-id 0
Given that the naming of pmem devices changes from the pmemX form to the
pmemX.Y form when namespace id is greater than 0, arrange for namespaces
with id-0 to be exempt from deletion. Otherwise a simple reconfiguration
of an existing namespace to a new mode results in a name change of the
resulting block device:
# ndctl list --namespace=namespace1.0
{
"dev":"namespace1.0",
"mode":"raw",
"size":2147483648,
"uuid":"3dadf3dc-89b9-4b24-b20e-abc8a4707ce3",
"blockdev":"pmem1"
}
# ndctl create-namespace --reconfig=namespace1.0 --mode=memory --force
{
"dev":"namespace1.1",
"mode":"memory",
"size":2111832064,
"uuid":"7b4a6341-7318-4219-a02c-fb57c0bbf613",
"blockdev":"pmem1.1"
}
This change does require tooling changes to explicitly look for
namespaceX.0 if the seed has already advanced to another namespace.
Cc: <stable@vger.kernel.org>
Fixes: 98a29c39dc68 ("libnvdimm, namespace: allow creation of multiple pmem-namespaces per region")
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2017-01-25 03:24:07 +08:00
|
|
|
int rc, i, id = -1;
|
2015-06-18 05:14:46 +08:00
|
|
|
u8 *uuid = NULL;
|
|
|
|
|
2017-01-10 09:30:49 +08:00
|
|
|
if (dev->driver || ndns->claim)
|
2015-06-18 05:14:46 +08:00
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
|
|
|
uuid = nspm->uuid;
|
libnvdimm, namespace: do not delete namespace-id 0
Given that the naming of pmem devices changes from the pmemX form to the
pmemX.Y form when namespace id is greater than 0, arrange for namespaces
with id-0 to be exempt from deletion. Otherwise a simple reconfiguration
of an existing namespace to a new mode results in a name change of the
resulting block device:
# ndctl list --namespace=namespace1.0
{
"dev":"namespace1.0",
"mode":"raw",
"size":2147483648,
"uuid":"3dadf3dc-89b9-4b24-b20e-abc8a4707ce3",
"blockdev":"pmem1"
}
# ndctl create-namespace --reconfig=namespace1.0 --mode=memory --force
{
"dev":"namespace1.1",
"mode":"memory",
"size":2111832064,
"uuid":"7b4a6341-7318-4219-a02c-fb57c0bbf613",
"blockdev":"pmem1.1"
}
This change does require tooling changes to explicitly look for
namespaceX.0 if the seed has already advanced to another namespace.
Cc: <stable@vger.kernel.org>
Fixes: 98a29c39dc68 ("libnvdimm, namespace: allow creation of multiple pmem-namespaces per region")
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2017-01-25 03:24:07 +08:00
|
|
|
id = nspm->id;
|
2015-06-18 05:14:46 +08:00
|
|
|
} else if (is_namespace_blk(dev)) {
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
|
|
|
|
uuid = nsblk->uuid;
|
|
|
|
flags = NSLABEL_FLAG_LOCAL;
|
libnvdimm, namespace: do not delete namespace-id 0
Given that the naming of pmem devices changes from the pmemX form to the
pmemX.Y form when namespace id is greater than 0, arrange for namespaces
with id-0 to be exempt from deletion. Otherwise a simple reconfiguration
of an existing namespace to a new mode results in a name change of the
resulting block device:
# ndctl list --namespace=namespace1.0
{
"dev":"namespace1.0",
"mode":"raw",
"size":2147483648,
"uuid":"3dadf3dc-89b9-4b24-b20e-abc8a4707ce3",
"blockdev":"pmem1"
}
# ndctl create-namespace --reconfig=namespace1.0 --mode=memory --force
{
"dev":"namespace1.1",
"mode":"memory",
"size":2111832064,
"uuid":"7b4a6341-7318-4219-a02c-fb57c0bbf613",
"blockdev":"pmem1.1"
}
This change does require tooling changes to explicitly look for
namespaceX.0 if the seed has already advanced to another namespace.
Cc: <stable@vger.kernel.org>
Fixes: 98a29c39dc68 ("libnvdimm, namespace: allow creation of multiple pmem-namespaces per region")
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2017-01-25 03:24:07 +08:00
|
|
|
id = nsblk->id;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need a uuid for the allocation-label and dimm(s) on which
|
|
|
|
* to store the label.
|
|
|
|
*/
|
2015-12-02 05:48:12 +08:00
|
|
|
if (uuid_not_set(uuid, dev, __func__))
|
2015-06-18 05:14:46 +08:00
|
|
|
return -ENXIO;
|
2015-12-02 05:48:12 +08:00
|
|
|
if (nd_region->ndr_mappings == 0) {
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "not associated with dimm(s)\n");
|
2015-12-02 05:48:12 +08:00
|
|
|
return -ENXIO;
|
|
|
|
}
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2019-09-05 23:46:02 +08:00
|
|
|
div_u64_rem(val, PAGE_SIZE * nd_region->ndr_mappings, &remainder);
|
2015-06-18 05:14:46 +08:00
|
|
|
if (remainder) {
|
2019-09-05 23:46:02 +08:00
|
|
|
dev_dbg(dev, "%llu is not %ldK aligned\n", val,
|
|
|
|
(PAGE_SIZE * nd_region->ndr_mappings) / SZ_1K);
|
2015-06-18 05:14:46 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
nd_label_gen_id(&label_id, uuid, flags);
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
nd_mapping = &nd_region->mapping[i];
|
|
|
|
ndd = to_ndd(nd_mapping);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All dimms in an interleave set, or the base dimm for a blk
|
|
|
|
* region, need to be enabled for the size to be changed.
|
|
|
|
*/
|
|
|
|
if (!ndd)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
allocated += nvdimm_allocated_dpa(ndd, &label_id);
|
|
|
|
}
|
2018-07-25 05:07:57 +08:00
|
|
|
available = nd_region_allocatable_dpa(nd_region);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
|
|
|
if (val > available + allocated)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
if (val == allocated)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val = div_u64(val, nd_region->ndr_mappings);
|
|
|
|
allocated = div_u64(allocated, nd_region->ndr_mappings);
|
|
|
|
if (val < allocated)
|
|
|
|
rc = shrink_dpa_allocation(nd_region, &label_id,
|
|
|
|
allocated - val);
|
|
|
|
else
|
|
|
|
rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
2016-10-07 14:13:15 +08:00
|
|
|
nd_namespace_pmem_set_resource(nd_region, nspm,
|
2015-06-18 05:14:46 +08:00
|
|
|
val * nd_region->ndr_mappings);
|
|
|
|
}
|
|
|
|
|
2017-01-10 09:30:49 +08:00
|
|
|
/*
|
|
|
|
* Try to delete the namespace if we deleted all of its
|
libnvdimm, namespace: do not delete namespace-id 0
Given that the naming of pmem devices changes from the pmemX form to the
pmemX.Y form when namespace id is greater than 0, arrange for namespaces
with id-0 to be exempt from deletion. Otherwise a simple reconfiguration
of an existing namespace to a new mode results in a name change of the
resulting block device:
# ndctl list --namespace=namespace1.0
{
"dev":"namespace1.0",
"mode":"raw",
"size":2147483648,
"uuid":"3dadf3dc-89b9-4b24-b20e-abc8a4707ce3",
"blockdev":"pmem1"
}
# ndctl create-namespace --reconfig=namespace1.0 --mode=memory --force
{
"dev":"namespace1.1",
"mode":"memory",
"size":2111832064,
"uuid":"7b4a6341-7318-4219-a02c-fb57c0bbf613",
"blockdev":"pmem1.1"
}
This change does require tooling changes to explicitly look for
namespaceX.0 if the seed has already advanced to another namespace.
Cc: <stable@vger.kernel.org>
Fixes: 98a29c39dc68 ("libnvdimm, namespace: allow creation of multiple pmem-namespaces per region")
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2017-01-25 03:24:07 +08:00
|
|
|
* allocation, this is not the seed or 0th device for the
|
|
|
|
* region, and it is not actively claimed by a btt, pfn, or dax
|
|
|
|
* instance.
|
2017-01-10 09:30:49 +08:00
|
|
|
*/
|
libnvdimm, namespace: do not delete namespace-id 0
Given that the naming of pmem devices changes from the pmemX form to the
pmemX.Y form when namespace id is greater than 0, arrange for namespaces
with id-0 to be exempt from deletion. Otherwise a simple reconfiguration
of an existing namespace to a new mode results in a name change of the
resulting block device:
# ndctl list --namespace=namespace1.0
{
"dev":"namespace1.0",
"mode":"raw",
"size":2147483648,
"uuid":"3dadf3dc-89b9-4b24-b20e-abc8a4707ce3",
"blockdev":"pmem1"
}
# ndctl create-namespace --reconfig=namespace1.0 --mode=memory --force
{
"dev":"namespace1.1",
"mode":"memory",
"size":2111832064,
"uuid":"7b4a6341-7318-4219-a02c-fb57c0bbf613",
"blockdev":"pmem1.1"
}
This change does require tooling changes to explicitly look for
namespaceX.0 if the seed has already advanced to another namespace.
Cc: <stable@vger.kernel.org>
Fixes: 98a29c39dc68 ("libnvdimm, namespace: allow creation of multiple pmem-namespaces per region")
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2017-01-25 03:24:07 +08:00
|
|
|
if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
|
2017-01-10 09:30:49 +08:00
|
|
|
nd_device_unregister(dev, ND_ASYNC);
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t size_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
|
{
|
2015-05-31 00:36:02 +08:00
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
2015-06-18 05:14:46 +08:00
|
|
|
unsigned long long val;
|
|
|
|
u8 **uuid = NULL;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = kstrtoull(buf, 0, &val);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_lock(dev);
|
2015-06-18 05:14:46 +08:00
|
|
|
nvdimm_bus_lock(dev);
|
|
|
|
wait_nvdimm_bus_probe_idle(dev);
|
|
|
|
rc = __size_store(dev, val);
|
2015-05-31 00:36:02 +08:00
|
|
|
if (rc >= 0)
|
|
|
|
rc = nd_namespace_label_update(nd_region, dev);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
|
|
|
uuid = &nspm->uuid;
|
|
|
|
} else if (is_namespace_blk(dev)) {
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
|
|
|
|
uuid = &nsblk->uuid;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (rc == 0 && val == 0 && uuid) {
|
|
|
|
/* setting size zero == 'delete namespace' */
|
|
|
|
kfree(*uuid);
|
|
|
|
*uuid = NULL;
|
|
|
|
}
|
|
|
|
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
|
|
|
nvdimm_bus_unlock(dev);
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_unlock(dev);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2015-05-31 00:36:02 +08:00
|
|
|
return rc < 0 ? rc : len;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
|
2015-06-18 05:14:46 +08:00
|
|
|
{
|
2015-06-25 16:20:04 +08:00
|
|
|
struct device *dev = &ndns->dev;
|
2015-05-02 01:34:01 +08:00
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
return resource_size(&nspm->nsio.res);
|
2015-06-18 05:14:46 +08:00
|
|
|
} else if (is_namespace_blk(dev)) {
|
2015-06-25 16:20:04 +08:00
|
|
|
return nd_namespace_blk_size(to_nd_namespace_blk(dev));
|
2015-06-18 05:14:46 +08:00
|
|
|
} else if (is_namespace_io(dev)) {
|
|
|
|
struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
return resource_size(&nsio->res);
|
|
|
|
} else
|
|
|
|
WARN_ONCE(1, "unknown namespace type\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
|
|
|
|
{
|
|
|
|
resource_size_t size;
|
2015-05-02 01:34:01 +08:00
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
nvdimm_bus_lock(&ndns->dev);
|
|
|
|
size = __nvdimm_namespace_capacity(ndns);
|
|
|
|
nvdimm_bus_unlock(&ndns->dev);
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nvdimm_namespace_capacity);
|
|
|
|
|
2018-06-14 00:08:36 +08:00
|
|
|
bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
bool locked = false;
|
|
|
|
struct device *dev = &ndns->dev;
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
|
|
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
|
|
struct nvdimm *nvdimm = nd_mapping->nvdimm;
|
|
|
|
|
|
|
|
if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
|
|
|
|
dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
|
|
|
|
locked = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return locked;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nvdimm_namespace_locked);
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
static ssize_t size_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "%llu\n", (unsigned long long)
|
|
|
|
nvdimm_namespace_capacity(to_ndns(dev)));
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
2016-12-05 02:54:08 +08:00
|
|
|
static DEVICE_ATTR(size, 0444, size_show, size_store);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2016-09-22 09:16:21 +08:00
|
|
|
static u8 *namespace_to_uuid(struct device *dev)
|
2015-06-18 05:14:46 +08:00
|
|
|
{
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
2016-09-22 09:16:21 +08:00
|
|
|
return nspm->uuid;
|
2015-06-18 05:14:46 +08:00
|
|
|
} else if (is_namespace_blk(dev)) {
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
|
2016-09-22 09:16:21 +08:00
|
|
|
return nsblk->uuid;
|
2015-06-18 05:14:46 +08:00
|
|
|
} else
|
2016-09-22 09:16:21 +08:00
|
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t uuid_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
u8 *uuid = namespace_to_uuid(dev);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2016-09-22 09:16:21 +08:00
|
|
|
if (IS_ERR(uuid))
|
|
|
|
return PTR_ERR(uuid);
|
2015-06-18 05:14:46 +08:00
|
|
|
if (uuid)
|
|
|
|
return sprintf(buf, "%pUb\n", uuid);
|
|
|
|
return sprintf(buf, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* namespace_update_uuid - check for a unique uuid and whether we're "renaming"
|
|
|
|
* @nd_region: parent region so we can updates all dimms in the set
|
|
|
|
* @dev: namespace type for generating label_id
|
|
|
|
* @new_uuid: incoming uuid
|
|
|
|
* @old_uuid: reference to the uuid storage location in the namespace object
|
|
|
|
*/
|
|
|
|
static int namespace_update_uuid(struct nd_region *nd_region,
|
|
|
|
struct device *dev, u8 *new_uuid, u8 **old_uuid)
|
|
|
|
{
|
|
|
|
u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
|
|
|
|
struct nd_label_id old_label_id;
|
|
|
|
struct nd_label_id new_label_id;
|
2015-05-31 00:36:02 +08:00
|
|
|
int i;
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2015-05-31 00:36:02 +08:00
|
|
|
if (!nd_is_uuid_unique(dev, new_uuid))
|
|
|
|
return -EINVAL;
|
2015-06-18 05:14:46 +08:00
|
|
|
|
|
|
|
if (*old_uuid == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2015-05-31 00:36:02 +08:00
|
|
|
/*
|
|
|
|
* If we've already written a label with this uuid, then it's
|
|
|
|
* too late to rename because we can't reliably update the uuid
|
|
|
|
* without losing the old namespace. Userspace must delete this
|
|
|
|
* namespace to abandon the old uuid.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This check by itself is sufficient because old_uuid
|
|
|
|
* would be NULL above if this uuid did not exist in the
|
|
|
|
* currently written set.
|
|
|
|
*
|
|
|
|
* FIXME: can we delete uuid with zero dpa allocated?
|
|
|
|
*/
|
2016-09-20 07:04:21 +08:00
|
|
|
if (list_empty(&nd_mapping->labels))
|
2015-05-31 00:36:02 +08:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
nd_label_gen_id(&old_label_id, *old_uuid, flags);
|
|
|
|
nd_label_gen_id(&new_label_id, new_uuid, flags);
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
2019-05-01 12:51:21 +08:00
|
|
|
struct nd_label_ent *label_ent;
|
2015-06-18 05:14:46 +08:00
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
for_each_dpa_resource(ndd, res)
|
|
|
|
if (strcmp(res->name, old_label_id.id) == 0)
|
|
|
|
sprintf((void *) res->name, "%s",
|
|
|
|
new_label_id.id);
|
2019-05-01 12:51:21 +08:00
|
|
|
|
|
|
|
mutex_lock(&nd_mapping->lock);
|
|
|
|
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
|
|
|
|
struct nd_namespace_label *nd_label = label_ent->label;
|
|
|
|
struct nd_label_id label_id;
|
|
|
|
|
|
|
|
if (!nd_label)
|
|
|
|
continue;
|
|
|
|
nd_label_gen_id(&label_id, nd_label->uuid,
|
|
|
|
__le32_to_cpu(nd_label->flags));
|
|
|
|
if (strcmp(old_label_id.id, label_id.id) == 0)
|
|
|
|
set_bit(ND_LABEL_REAP, &label_ent->flags);
|
|
|
|
}
|
|
|
|
mutex_unlock(&nd_mapping->lock);
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
kfree(*old_uuid);
|
|
|
|
out:
|
|
|
|
*old_uuid = new_uuid;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t uuid_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
|
|
u8 *uuid = NULL;
|
2015-06-25 16:20:04 +08:00
|
|
|
ssize_t rc = 0;
|
2015-06-18 05:14:46 +08:00
|
|
|
u8 **ns_uuid;
|
|
|
|
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
|
|
|
ns_uuid = &nspm->uuid;
|
|
|
|
} else if (is_namespace_blk(dev)) {
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
|
|
|
|
ns_uuid = &nsblk->uuid;
|
2015-06-18 05:14:46 +08:00
|
|
|
} else
|
|
|
|
return -ENXIO;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_lock(dev);
|
2015-06-18 05:14:46 +08:00
|
|
|
nvdimm_bus_lock(dev);
|
|
|
|
wait_nvdimm_bus_probe_idle(dev);
|
2015-06-25 16:20:04 +08:00
|
|
|
if (to_ndns(dev)->claim)
|
|
|
|
rc = -EBUSY;
|
|
|
|
if (rc >= 0)
|
|
|
|
rc = nd_uuid_store(dev, &uuid, buf, len);
|
2015-06-18 05:14:46 +08:00
|
|
|
if (rc >= 0)
|
|
|
|
rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
|
2015-05-31 00:36:02 +08:00
|
|
|
if (rc >= 0)
|
|
|
|
rc = nd_namespace_label_update(nd_region, dev);
|
|
|
|
else
|
|
|
|
kfree(uuid);
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
|
|
|
buf[len - 1] == '\n' ? "" : "\n");
|
2015-06-18 05:14:46 +08:00
|
|
|
nvdimm_bus_unlock(dev);
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_unlock(dev);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2015-05-31 00:36:02 +08:00
|
|
|
return rc < 0 ? rc : len;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(uuid);
|
|
|
|
|
|
|
|
static ssize_t resource_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
|
|
|
res = &nspm->nsio.res;
|
|
|
|
} else if (is_namespace_io(dev)) {
|
|
|
|
struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
|
|
|
|
|
|
|
|
res = &nsio->res;
|
|
|
|
} else
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
/* no address to convey if the namespace has no allocation */
|
|
|
|
if (resource_size(res) == 0)
|
|
|
|
return -ENXIO;
|
|
|
|
return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(resource);
|
|
|
|
|
2017-06-04 11:12:07 +08:00
|
|
|
static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
|
2015-06-25 16:22:39 +08:00
|
|
|
4096, 4104, 4160, 4224, 0 };
|
2015-05-02 01:34:01 +08:00
|
|
|
|
2017-06-04 11:12:07 +08:00
|
|
|
static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
|
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
static ssize_t sector_size_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2017-06-04 11:12:07 +08:00
|
|
|
if (is_namespace_blk(dev)) {
|
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
2015-05-02 01:34:01 +08:00
|
|
|
|
2017-08-12 08:36:54 +08:00
|
|
|
return nd_size_select_show(nsblk->lbasize,
|
2017-06-04 11:12:07 +08:00
|
|
|
blk_lbasize_supported, buf);
|
|
|
|
}
|
2015-05-02 01:34:01 +08:00
|
|
|
|
2017-06-04 11:12:07 +08:00
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
2017-08-12 08:36:54 +08:00
|
|
|
return nd_size_select_show(nspm->lbasize,
|
2017-06-04 11:12:07 +08:00
|
|
|
pmem_lbasize_supported, buf);
|
|
|
|
}
|
|
|
|
return -ENXIO;
|
2015-05-02 01:34:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t sector_size_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
|
{
|
2015-05-31 00:36:02 +08:00
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
2017-06-04 11:12:07 +08:00
|
|
|
const unsigned long *supported;
|
|
|
|
unsigned long *lbasize;
|
2015-06-25 16:20:04 +08:00
|
|
|
ssize_t rc = 0;
|
2015-05-02 01:34:01 +08:00
|
|
|
|
2017-06-04 11:12:07 +08:00
|
|
|
if (is_namespace_blk(dev)) {
|
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
|
|
|
|
lbasize = &nsblk->lbasize;
|
|
|
|
supported = blk_lbasize_supported;
|
|
|
|
} else if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
|
|
|
lbasize = &nspm->lbasize;
|
|
|
|
supported = pmem_lbasize_supported;
|
|
|
|
} else
|
2015-05-02 01:34:01 +08:00
|
|
|
return -ENXIO;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_lock(dev);
|
2015-05-02 01:34:01 +08:00
|
|
|
nvdimm_bus_lock(dev);
|
2015-06-25 16:20:04 +08:00
|
|
|
if (to_ndns(dev)->claim)
|
|
|
|
rc = -EBUSY;
|
|
|
|
if (rc >= 0)
|
2017-08-12 08:36:54 +08:00
|
|
|
rc = nd_size_select_store(dev, buf, lbasize, supported);
|
2015-05-31 00:36:02 +08:00
|
|
|
if (rc >= 0)
|
|
|
|
rc = nd_namespace_label_update(nd_region, dev);
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
|
|
|
|
buf, buf[len - 1] == '\n' ? "" : "\n");
|
2015-05-02 01:34:01 +08:00
|
|
|
nvdimm_bus_unlock(dev);
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_unlock(dev);
|
2015-05-02 01:34:01 +08:00
|
|
|
|
|
|
|
return rc ? rc : len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(sector_size);
|
|
|
|
|
2015-05-31 00:35:36 +08:00
|
|
|
static ssize_t dpa_extents_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
|
|
struct nd_label_id label_id;
|
|
|
|
int count = 0, i;
|
|
|
|
u8 *uuid = NULL;
|
|
|
|
u32 flags = 0;
|
|
|
|
|
|
|
|
nvdimm_bus_lock(dev);
|
|
|
|
if (is_namespace_pmem(dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
|
|
|
|
|
|
uuid = nspm->uuid;
|
|
|
|
flags = 0;
|
|
|
|
} else if (is_namespace_blk(dev)) {
|
|
|
|
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
|
|
|
|
|
|
uuid = nsblk->uuid;
|
|
|
|
flags = NSLABEL_FLAG_LOCAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!uuid)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
nd_label_gen_id(&label_id, uuid, flags);
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
for_each_dpa_resource(ndd, res)
|
|
|
|
if (strcmp(res->name, label_id.id) == 0)
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
nvdimm_bus_unlock(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%d\n", count);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(dpa_extents);
|
|
|
|
|
2017-06-29 04:25:00 +08:00
|
|
|
static int btt_claim_class(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
|
|
int i, loop_bitmask = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
struct nd_namespace_index *nsindex;
|
|
|
|
|
2017-09-19 05:48:58 +08:00
|
|
|
/*
|
|
|
|
* If any of the DIMMs do not support labels the only
|
|
|
|
* possible BTT format is v1.
|
|
|
|
*/
|
|
|
|
if (!ndd) {
|
|
|
|
loop_bitmask = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-06-29 04:25:00 +08:00
|
|
|
nsindex = to_namespace_index(ndd, ndd->ns_current);
|
|
|
|
if (nsindex == NULL)
|
|
|
|
loop_bitmask |= 1;
|
|
|
|
else {
|
|
|
|
/* check whether existing labels are v1.1 or v1.2 */
|
|
|
|
if (__le16_to_cpu(nsindex->major) == 1
|
|
|
|
&& __le16_to_cpu(nsindex->minor) == 1)
|
|
|
|
loop_bitmask |= 2;
|
|
|
|
else
|
|
|
|
loop_bitmask |= 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If nsindex is null loop_bitmask's bit 0 will be set, and if an index
|
|
|
|
* block is found, a v1.1 label for any mapping will set bit 1, and a
|
|
|
|
* v1.2 label will set bit 2.
|
|
|
|
*
|
|
|
|
* At the end of the loop, at most one of the three bits must be set.
|
|
|
|
* If multiple bits were set, it means the different mappings disagree
|
|
|
|
* about their labels, and this must be cleaned up first.
|
|
|
|
*
|
|
|
|
* If all the label index blocks are found to agree, nsindex of NULL
|
|
|
|
* implies labels haven't been initialized yet, and when they will,
|
|
|
|
* they will be of the 1.2 format, so we can assume BTT2.0
|
|
|
|
*
|
|
|
|
* If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
|
|
|
|
* found, we enforce BTT2.0
|
|
|
|
*
|
|
|
|
* If the loop was never entered, default to BTT1.1 (legacy namespaces)
|
|
|
|
*/
|
|
|
|
switch (loop_bitmask) {
|
|
|
|
case 0:
|
|
|
|
case 2:
|
|
|
|
return NVDIMM_CCLASS_BTT;
|
|
|
|
case 1:
|
|
|
|
case 4:
|
|
|
|
return NVDIMM_CCLASS_BTT2;
|
|
|
|
default:
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
static ssize_t holder_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nd_namespace_common *ndns = to_ndns(dev);
|
|
|
|
ssize_t rc;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_lock(dev);
|
2015-06-25 16:20:04 +08:00
|
|
|
rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_unlock(dev);
|
2015-06-25 16:20:04 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(holder);
|
|
|
|
|
2017-06-04 09:18:39 +08:00
|
|
|
static ssize_t __holder_class_store(struct device *dev, const char *buf)
|
|
|
|
{
|
|
|
|
struct nd_namespace_common *ndns = to_ndns(dev);
|
|
|
|
|
|
|
|
if (dev->driver || ndns->claim)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2019-03-05 04:14:04 +08:00
|
|
|
if (sysfs_streq(buf, "btt"))
|
2017-06-29 04:25:00 +08:00
|
|
|
ndns->claim_class = btt_claim_class(dev);
|
2019-03-05 04:14:04 +08:00
|
|
|
else if (sysfs_streq(buf, "pfn"))
|
2017-06-04 09:18:39 +08:00
|
|
|
ndns->claim_class = NVDIMM_CCLASS_PFN;
|
2019-03-05 04:14:04 +08:00
|
|
|
else if (sysfs_streq(buf, "dax"))
|
2017-06-04 09:18:39 +08:00
|
|
|
ndns->claim_class = NVDIMM_CCLASS_DAX;
|
2019-03-05 04:14:04 +08:00
|
|
|
else if (sysfs_streq(buf, ""))
|
2017-06-04 09:18:39 +08:00
|
|
|
ndns->claim_class = NVDIMM_CCLASS_NONE;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-06-29 04:25:00 +08:00
|
|
|
/* btt_claim_class() could've returned an error */
|
|
|
|
if (ndns->claim_class < 0)
|
|
|
|
return ndns->claim_class;
|
|
|
|
|
2017-06-04 09:18:39 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t holder_class_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
|
|
ssize_t rc;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_lock(dev);
|
2017-06-04 09:18:39 +08:00
|
|
|
nvdimm_bus_lock(dev);
|
|
|
|
wait_nvdimm_bus_probe_idle(dev);
|
|
|
|
rc = __holder_class_store(dev, buf);
|
|
|
|
if (rc >= 0)
|
|
|
|
rc = nd_namespace_label_update(nd_region, dev);
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
|
2017-06-04 09:18:39 +08:00
|
|
|
nvdimm_bus_unlock(dev);
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_unlock(dev);
|
2017-06-04 09:18:39 +08:00
|
|
|
|
|
|
|
return rc < 0 ? rc : len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t holder_class_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nd_namespace_common *ndns = to_ndns(dev);
|
|
|
|
ssize_t rc;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_lock(dev);
|
2017-06-04 09:18:39 +08:00
|
|
|
if (ndns->claim_class == NVDIMM_CCLASS_NONE)
|
|
|
|
rc = sprintf(buf, "\n");
|
2017-06-29 04:25:00 +08:00
|
|
|
else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
|
|
|
|
(ndns->claim_class == NVDIMM_CCLASS_BTT2))
|
2017-06-04 09:18:39 +08:00
|
|
|
rc = sprintf(buf, "btt\n");
|
|
|
|
else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
|
|
|
|
rc = sprintf(buf, "pfn\n");
|
|
|
|
else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
|
|
|
|
rc = sprintf(buf, "dax\n");
|
|
|
|
else
|
|
|
|
rc = sprintf(buf, "<unknown>\n");
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_unlock(dev);
|
2017-06-04 09:18:39 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(holder_class);
|
|
|
|
|
2015-12-15 07:34:15 +08:00
|
|
|
static ssize_t mode_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nd_namespace_common *ndns = to_ndns(dev);
|
|
|
|
struct device *claim;
|
|
|
|
char *mode;
|
|
|
|
ssize_t rc;
|
|
|
|
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_lock(dev);
|
2015-12-15 07:34:15 +08:00
|
|
|
claim = ndns->claim;
|
2016-01-24 07:34:10 +08:00
|
|
|
if (claim && is_nd_btt(claim))
|
2015-12-15 07:34:15 +08:00
|
|
|
mode = "safe";
|
2016-01-24 07:34:10 +08:00
|
|
|
else if (claim && is_nd_pfn(claim))
|
|
|
|
mode = "memory";
|
2016-03-12 02:15:36 +08:00
|
|
|
else if (claim && is_nd_dax(claim))
|
|
|
|
mode = "dax";
|
2016-01-24 07:34:10 +08:00
|
|
|
else if (!claim && pmem_should_map_pages(dev))
|
|
|
|
mode = "memory";
|
2015-12-15 07:34:15 +08:00
|
|
|
else
|
|
|
|
mode = "raw";
|
|
|
|
rc = sprintf(buf, "%s\n", mode);
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_unlock(dev);
|
2015-12-15 07:34:15 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(mode);
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
static ssize_t force_raw_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
|
{
|
|
|
|
bool force_raw;
|
|
|
|
int rc = strtobool(buf, &force_raw);
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
to_ndns(dev)->force_raw = force_raw;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t force_raw_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(force_raw);
|
|
|
|
|
2015-06-01 03:02:11 +08:00
|
|
|
static struct attribute *nd_namespace_attributes[] = {
|
|
|
|
&dev_attr_nstype.attr,
|
2015-06-18 05:14:46 +08:00
|
|
|
&dev_attr_size.attr,
|
2015-12-15 07:34:15 +08:00
|
|
|
&dev_attr_mode.attr,
|
2015-06-18 05:14:46 +08:00
|
|
|
&dev_attr_uuid.attr,
|
2015-06-25 16:20:04 +08:00
|
|
|
&dev_attr_holder.attr,
|
2015-06-18 05:14:46 +08:00
|
|
|
&dev_attr_resource.attr,
|
|
|
|
&dev_attr_alt_name.attr,
|
2015-06-25 16:20:04 +08:00
|
|
|
&dev_attr_force_raw.attr,
|
2015-05-02 01:34:01 +08:00
|
|
|
&dev_attr_sector_size.attr,
|
2015-05-31 00:35:36 +08:00
|
|
|
&dev_attr_dpa_extents.attr,
|
2017-06-04 09:18:39 +08:00
|
|
|
&dev_attr_holder_class.attr,
|
2015-06-01 03:02:11 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
static umode_t namespace_visible(struct kobject *kobj,
|
|
|
|
struct attribute *a, int n)
|
|
|
|
{
|
|
|
|
struct device *dev = container_of(kobj, struct device, kobj);
|
|
|
|
|
|
|
|
if (a == &dev_attr_resource.attr) {
|
|
|
|
if (is_namespace_blk(dev))
|
|
|
|
return 0;
|
2017-09-27 02:21:24 +08:00
|
|
|
return 0400;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
|
|
|
|
if (a == &dev_attr_size.attr)
|
2016-12-05 02:54:08 +08:00
|
|
|
return 0644;
|
2015-05-02 01:34:01 +08:00
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
return a->mode;
|
|
|
|
}
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
|
|
|
|
|| a == &dev_attr_holder.attr
|
2017-06-04 09:18:39 +08:00
|
|
|
|| a == &dev_attr_holder_class.attr
|
2015-12-15 07:34:15 +08:00
|
|
|
|| a == &dev_attr_force_raw.attr
|
|
|
|
|| a == &dev_attr_mode.attr)
|
2015-06-18 05:14:46 +08:00
|
|
|
return a->mode;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-01 03:02:11 +08:00
|
|
|
static struct attribute_group nd_namespace_attribute_group = {
|
|
|
|
.attrs = nd_namespace_attributes,
|
2015-06-18 05:14:46 +08:00
|
|
|
.is_visible = namespace_visible,
|
2015-06-01 03:02:11 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *nd_namespace_attribute_groups[] = {
|
|
|
|
&nd_device_attribute_group,
|
|
|
|
&nd_namespace_attribute_group,
|
2015-06-20 02:18:34 +08:00
|
|
|
&nd_numa_attribute_group,
|
2015-06-01 03:02:11 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
|
2015-07-31 05:57:47 +08:00
|
|
|
struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
|
2016-04-14 08:06:48 +08:00
|
|
|
struct nd_namespace_common *ndns = NULL;
|
2015-06-25 16:20:04 +08:00
|
|
|
resource_size_t size;
|
|
|
|
|
2016-03-12 02:15:36 +08:00
|
|
|
if (nd_btt || nd_pfn || nd_dax) {
|
2016-04-14 08:06:48 +08:00
|
|
|
if (nd_btt)
|
2015-07-31 05:57:47 +08:00
|
|
|
ndns = nd_btt->ndns;
|
2016-04-14 08:06:48 +08:00
|
|
|
else if (nd_pfn)
|
2015-07-31 05:57:47 +08:00
|
|
|
ndns = nd_pfn->ndns;
|
2016-03-12 02:15:36 +08:00
|
|
|
else if (nd_dax)
|
|
|
|
ndns = nd_dax->nd_pfn.ndns;
|
2015-07-31 05:57:47 +08:00
|
|
|
|
2016-04-14 08:06:48 +08:00
|
|
|
if (!ndns)
|
2015-06-25 16:20:04 +08:00
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush any in-progess probes / removals in the driver
|
|
|
|
* for the raw personality of this namespace.
|
|
|
|
*/
|
2019-07-18 09:08:26 +08:00
|
|
|
nd_device_lock(&ndns->dev);
|
|
|
|
nd_device_unlock(&ndns->dev);
|
2015-06-25 16:20:04 +08:00
|
|
|
if (ndns->dev.driver) {
|
|
|
|
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
|
2016-04-14 08:06:48 +08:00
|
|
|
dev_name(dev));
|
2015-06-25 16:20:04 +08:00
|
|
|
return ERR_PTR(-EBUSY);
|
|
|
|
}
|
2016-04-14 08:06:48 +08:00
|
|
|
if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
|
2015-06-25 16:20:04 +08:00
|
|
|
"host (%s) vs claim (%s) mismatch\n",
|
2016-04-14 08:06:48 +08:00
|
|
|
dev_name(dev),
|
2015-06-25 16:20:04 +08:00
|
|
|
dev_name(ndns->claim)))
|
|
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
} else {
|
|
|
|
ndns = to_ndns(dev);
|
|
|
|
if (ndns->claim) {
|
|
|
|
dev_dbg(dev, "claimed by %s, failing probe\n",
|
|
|
|
dev_name(ndns->claim));
|
|
|
|
|
|
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-14 00:08:36 +08:00
|
|
|
if (nvdimm_namespace_locked(ndns))
|
|
|
|
return ERR_PTR(-EACCES);
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
size = nvdimm_namespace_capacity(ndns);
|
|
|
|
if (size < ND_MIN_NAMESPACE_SIZE) {
|
|
|
|
dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
|
|
|
|
&size, ND_MIN_NAMESPACE_SIZE);
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_namespace_pmem(&ndns->dev)) {
|
|
|
|
struct nd_namespace_pmem *nspm;
|
|
|
|
|
|
|
|
nspm = to_nd_namespace_pmem(&ndns->dev);
|
2015-12-02 05:48:12 +08:00
|
|
|
if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
|
2015-06-25 16:20:04 +08:00
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
} else if (is_namespace_blk(&ndns->dev)) {
|
2015-06-25 16:21:02 +08:00
|
|
|
struct nd_namespace_blk *nsblk;
|
|
|
|
|
|
|
|
nsblk = to_nd_namespace_blk(&ndns->dev);
|
2015-12-02 05:48:12 +08:00
|
|
|
if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
if (!nsblk->lbasize) {
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(&ndns->dev, "sector size not set\n");
|
2015-12-02 05:48:12 +08:00
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
2015-06-25 16:21:02 +08:00
|
|
|
if (!nd_namespace_blk_validate(nsblk))
|
|
|
|
return ERR_PTR(-ENODEV);
|
2015-06-25 16:20:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return ndns;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nvdimm_namespace_common_probe);
|
|
|
|
|
2015-06-01 03:02:11 +08:00
|
|
|
static struct device **create_namespace_io(struct nd_region *nd_region)
|
|
|
|
{
|
|
|
|
struct nd_namespace_io *nsio;
|
|
|
|
struct device *dev, **devs;
|
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
|
|
|
|
if (!nsio)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
|
|
|
|
if (!devs) {
|
|
|
|
kfree(nsio);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
dev = &nsio->common.dev;
|
2015-06-01 03:02:11 +08:00
|
|
|
dev->type = &namespace_io_device_type;
|
|
|
|
dev->parent = &nd_region->dev;
|
|
|
|
res = &nsio->res;
|
|
|
|
res->name = dev_name(&nd_region->dev);
|
|
|
|
res->flags = IORESOURCE_MEM;
|
|
|
|
res->start = nd_region->ndr_start;
|
|
|
|
res->end = res->start + nd_region->ndr_size - 1;
|
|
|
|
|
|
|
|
devs[0] = dev;
|
|
|
|
return devs;
|
|
|
|
}
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
|
|
|
|
u64 cookie, u16 pos)
|
|
|
|
{
|
|
|
|
struct nd_namespace_label *found = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
2017-06-07 02:10:51 +08:00
|
|
|
struct nd_interleave_set *nd_set = nd_region->nd_set;
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
2016-09-20 07:04:21 +08:00
|
|
|
struct nd_label_ent *label_ent;
|
2015-06-18 05:14:46 +08:00
|
|
|
bool found_uuid = false;
|
|
|
|
|
2016-09-20 07:04:21 +08:00
|
|
|
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
|
|
|
|
struct nd_namespace_label *nd_label = label_ent->label;
|
|
|
|
u16 position, nlabel;
|
|
|
|
u64 isetcookie;
|
|
|
|
|
|
|
|
if (!nd_label)
|
|
|
|
continue;
|
|
|
|
isetcookie = __le64_to_cpu(nd_label->isetcookie);
|
|
|
|
position = __le16_to_cpu(nd_label->position);
|
|
|
|
nlabel = __le16_to_cpu(nd_label->nlabel);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
|
|
|
if (isetcookie != cookie)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
|
|
|
|
continue;
|
|
|
|
|
2017-06-07 02:10:51 +08:00
|
|
|
if (namespace_label_has(ndd, type_guid)
|
|
|
|
&& !guid_equal(&nd_set->type_guid,
|
|
|
|
&nd_label->type_guid)) {
|
|
|
|
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
|
2019-06-21 19:45:18 +08:00
|
|
|
&nd_set->type_guid,
|
|
|
|
&nd_label->type_guid);
|
2017-06-07 02:10:51 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
if (found_uuid) {
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(ndd->dev, "duplicate entry for uuid\n");
|
2015-06-18 05:14:46 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
found_uuid = true;
|
|
|
|
if (nlabel != nd_region->ndr_mappings)
|
|
|
|
continue;
|
|
|
|
if (position != pos)
|
|
|
|
continue;
|
|
|
|
found = nd_label;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (found)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return found != NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!pmem_id)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
2016-10-07 14:13:15 +08:00
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
2016-09-20 07:04:21 +08:00
|
|
|
struct nd_namespace_label *nd_label = NULL;
|
2015-06-18 05:14:46 +08:00
|
|
|
u64 hw_start, hw_end, pmem_start, pmem_end;
|
2016-09-20 07:04:21 +08:00
|
|
|
struct nd_label_ent *label_ent;
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2016-12-16 12:04:31 +08:00
|
|
|
lockdep_assert_held(&nd_mapping->lock);
|
2016-09-20 07:04:21 +08:00
|
|
|
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
|
|
|
|
nd_label = label_ent->label;
|
|
|
|
if (!nd_label)
|
|
|
|
continue;
|
2015-06-18 05:14:46 +08:00
|
|
|
if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
|
|
|
|
break;
|
2016-09-20 07:04:21 +08:00
|
|
|
nd_label = NULL;
|
|
|
|
}
|
2015-06-18 05:14:46 +08:00
|
|
|
|
|
|
|
if (!nd_label) {
|
|
|
|
WARN_ON(1);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that this label is compliant with the dpa
|
|
|
|
* range published in NFIT
|
|
|
|
*/
|
|
|
|
hw_start = nd_mapping->start;
|
|
|
|
hw_end = hw_start + nd_mapping->size;
|
2016-09-20 07:04:21 +08:00
|
|
|
pmem_start = __le64_to_cpu(nd_label->dpa);
|
|
|
|
pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
|
2016-10-07 14:13:15 +08:00
|
|
|
if (pmem_start >= hw_start && pmem_start < hw_end
|
|
|
|
&& pmem_end <= hw_end && pmem_end > hw_start)
|
2015-06-18 05:14:46 +08:00
|
|
|
/* pass */;
|
2016-10-07 14:13:15 +08:00
|
|
|
else {
|
|
|
|
dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
|
|
|
|
dev_name(ndd->dev), nd_label->uuid);
|
2015-06-18 05:14:46 +08:00
|
|
|
return -EINVAL;
|
2016-10-07 14:13:15 +08:00
|
|
|
}
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
/* move recently validated label to the front of the list */
|
|
|
|
list_move(&label_ent->list, &nd_mapping->labels);
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-09-23 06:42:59 +08:00
|
|
|
* create_namespace_pmem - validate interleave set labelling, retrieve label0
|
2015-06-18 05:14:46 +08:00
|
|
|
* @nd_region: region with mappings to validate
|
2016-09-23 06:42:59 +08:00
|
|
|
* @nspm: target namespace to create
|
|
|
|
* @nd_label: target pmem namespace label to evaluate
|
2015-06-18 05:14:46 +08:00
|
|
|
*/
|
2017-10-05 17:55:57 +08:00
|
|
|
static struct device *create_namespace_pmem(struct nd_region *nd_region,
|
2017-06-04 09:59:15 +08:00
|
|
|
struct nd_namespace_index *nsindex,
|
2016-09-23 06:42:59 +08:00
|
|
|
struct nd_namespace_label *nd_label)
|
2015-06-18 05:14:46 +08:00
|
|
|
{
|
2017-06-04 09:59:15 +08:00
|
|
|
u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
|
2017-03-01 10:32:48 +08:00
|
|
|
u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
|
2016-09-20 07:04:21 +08:00
|
|
|
struct nd_label_ent *label_ent;
|
2016-09-23 06:42:59 +08:00
|
|
|
struct nd_namespace_pmem *nspm;
|
2016-09-20 07:04:21 +08:00
|
|
|
struct nd_mapping *nd_mapping;
|
2015-06-18 05:14:46 +08:00
|
|
|
resource_size_t size = 0;
|
2016-09-23 06:42:59 +08:00
|
|
|
struct resource *res;
|
|
|
|
struct device *dev;
|
2016-09-20 07:04:21 +08:00
|
|
|
int rc = 0;
|
2015-06-18 05:14:46 +08:00
|
|
|
u16 i;
|
|
|
|
|
2016-09-16 09:08:05 +08:00
|
|
|
if (cookie == 0) {
|
|
|
|
dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
|
2016-09-23 06:42:59 +08:00
|
|
|
return ERR_PTR(-ENXIO);
|
2016-09-16 09:08:05 +08:00
|
|
|
}
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
|
|
|
|
dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
|
|
|
|
nd_label->uuid);
|
2017-03-01 10:32:48 +08:00
|
|
|
if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
|
|
|
|
return ERR_PTR(-EAGAIN);
|
|
|
|
|
|
|
|
dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
|
|
|
|
nd_label->uuid);
|
2016-09-20 07:04:21 +08:00
|
|
|
}
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
|
|
|
|
if (!nspm)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-09-20 07:04:21 +08:00
|
|
|
|
2016-10-07 14:13:15 +08:00
|
|
|
nspm->id = -1;
|
2016-09-23 06:42:59 +08:00
|
|
|
dev = &nspm->nsio.common.dev;
|
|
|
|
dev->type = &namespace_pmem_device_type;
|
|
|
|
dev->parent = &nd_region->dev;
|
|
|
|
res = &nspm->nsio.res;
|
|
|
|
res->name = dev_name(&nd_region->dev);
|
|
|
|
res->flags = IORESOURCE_MEM;
|
2016-09-20 07:04:21 +08:00
|
|
|
|
2017-03-01 10:32:48 +08:00
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
|
|
|
|
continue;
|
|
|
|
if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
if (i < nd_region->ndr_mappings) {
|
2018-04-07 07:37:21 +08:00
|
|
|
struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
|
2016-10-07 14:13:15 +08:00
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
/*
|
|
|
|
* Give up if we don't find an instance of a uuid at each
|
|
|
|
* position (from 0 to nd_region->ndr_mappings - 1), or if we
|
|
|
|
* find a dimm with two instances of the same uuid.
|
|
|
|
*/
|
2016-10-07 14:13:15 +08:00
|
|
|
dev_err(&nd_region->dev, "%s missing label for %pUb\n",
|
2018-04-07 07:37:21 +08:00
|
|
|
nvdimm_name(nvdimm), nd_label->uuid);
|
2016-09-23 06:42:59 +08:00
|
|
|
rc = -EINVAL;
|
2016-09-20 07:04:21 +08:00
|
|
|
goto err;
|
2016-09-23 06:42:59 +08:00
|
|
|
}
|
2015-06-18 05:14:46 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fix up each mapping's 'labels' to have the validated pmem label for
|
|
|
|
* that position at labels[0], and NULL at labels[1]. In the process,
|
|
|
|
* check that the namespace aligns with interleave-set. We know
|
|
|
|
* that it does not overlap with any blk namespaces by virtue of
|
|
|
|
* the dimm being enabled (i.e. nd_label_reserve_dpa()
|
|
|
|
* succeeded).
|
|
|
|
*/
|
2016-09-23 06:42:59 +08:00
|
|
|
rc = select_pmem_id(nd_region, nd_label->uuid);
|
2015-06-18 05:14:46 +08:00
|
|
|
if (rc)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/* Calculate total size and populate namespace properties from label0 */
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
2016-09-20 07:04:21 +08:00
|
|
|
struct nd_namespace_label *label0;
|
2017-06-04 09:18:39 +08:00
|
|
|
struct nvdimm_drvdata *ndd;
|
2016-09-20 07:04:21 +08:00
|
|
|
|
|
|
|
nd_mapping = &nd_region->mapping[i];
|
|
|
|
label_ent = list_first_entry_or_null(&nd_mapping->labels,
|
|
|
|
typeof(*label_ent), list);
|
2019-08-09 15:47:26 +08:00
|
|
|
label0 = label_ent ? label_ent->label : NULL;
|
2016-09-20 07:04:21 +08:00
|
|
|
|
|
|
|
if (!label0) {
|
|
|
|
WARN_ON(1);
|
|
|
|
continue;
|
|
|
|
}
|
2015-06-18 05:14:46 +08:00
|
|
|
|
|
|
|
size += __le64_to_cpu(label0->rawsize);
|
|
|
|
if (__le16_to_cpu(label0->position) != 0)
|
|
|
|
continue;
|
|
|
|
WARN_ON(nspm->alt_name || nspm->uuid);
|
|
|
|
nspm->alt_name = kmemdup((void __force *) label0->name,
|
|
|
|
NSLABEL_NAME_LEN, GFP_KERNEL);
|
|
|
|
nspm->uuid = kmemdup((void __force *) label0->uuid,
|
|
|
|
NSLABEL_UUID_LEN, GFP_KERNEL);
|
2017-06-04 11:12:07 +08:00
|
|
|
nspm->lbasize = __le64_to_cpu(label0->lbasize);
|
2017-06-04 09:18:39 +08:00
|
|
|
ndd = to_ndd(nd_mapping);
|
|
|
|
if (namespace_label_has(ndd, abstraction_guid))
|
|
|
|
nspm->nsio.common.claim_class
|
|
|
|
= to_nvdimm_cclass(&label0->abstraction_guid);
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!nspm->alt_name || !nspm->uuid) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2016-10-07 14:13:15 +08:00
|
|
|
nd_namespace_pmem_set_resource(nd_region, nspm, size);
|
2015-06-18 05:14:46 +08:00
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
return dev;
|
2015-06-18 05:14:46 +08:00
|
|
|
err:
|
2016-09-23 06:42:59 +08:00
|
|
|
namespace_pmem_release(dev);
|
2015-06-18 05:14:46 +08:00
|
|
|
switch (rc) {
|
|
|
|
case -EINVAL:
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(&nd_region->dev, "invalid label(s)\n");
|
2015-06-18 05:14:46 +08:00
|
|
|
break;
|
|
|
|
case -ENODEV:
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(&nd_region->dev, "label not found\n");
|
2015-06-18 05:14:46 +08:00
|
|
|
break;
|
|
|
|
default:
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
|
2015-06-18 05:14:46 +08:00
|
|
|
break;
|
|
|
|
}
|
2016-09-23 06:42:59 +08:00
|
|
|
return ERR_PTR(rc);
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
struct resource *nsblk_add_resource(struct nd_region *nd_region,
|
|
|
|
struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
|
|
|
|
resource_size_t start)
|
|
|
|
{
|
|
|
|
struct nd_label_id label_id;
|
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
|
|
|
|
res = krealloc(nsblk->res,
|
|
|
|
sizeof(void *) * (nsblk->num_resources + 1),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!res)
|
|
|
|
return NULL;
|
|
|
|
nsblk->res = (struct resource **) res;
|
|
|
|
for_each_dpa_resource(ndd, res)
|
|
|
|
if (strcmp(res->name, label_id.id) == 0
|
|
|
|
&& res->start == start) {
|
|
|
|
nsblk->res[nsblk->num_resources++] = res;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
|
|
|
|
{
|
|
|
|
struct nd_namespace_blk *nsblk;
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
if (!is_nd_blk(&nd_region->dev))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
|
|
|
|
if (!nsblk)
|
|
|
|
return NULL;
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
dev = &nsblk->common.dev;
|
2015-05-02 01:34:01 +08:00
|
|
|
dev->type = &namespace_blk_device_type;
|
|
|
|
nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
|
|
|
|
if (nsblk->id < 0) {
|
|
|
|
kfree(nsblk);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
|
|
|
|
dev->parent = &nd_region->dev;
|
|
|
|
dev->groups = nd_namespace_attribute_groups;
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
return &nsblk->common.dev;
|
2015-05-02 01:34:01 +08:00
|
|
|
}
|
|
|
|
|
2016-10-01 06:28:27 +08:00
|
|
|
static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
|
|
|
|
{
|
|
|
|
struct nd_namespace_pmem *nspm;
|
|
|
|
struct resource *res;
|
|
|
|
struct device *dev;
|
|
|
|
|
2017-05-30 14:12:19 +08:00
|
|
|
if (!is_memory(&nd_region->dev))
|
2016-10-01 06:28:27 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
|
|
|
|
if (!nspm)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
dev = &nspm->nsio.common.dev;
|
|
|
|
dev->type = &namespace_pmem_device_type;
|
|
|
|
dev->parent = &nd_region->dev;
|
|
|
|
res = &nspm->nsio.res;
|
|
|
|
res->name = dev_name(&nd_region->dev);
|
|
|
|
res->flags = IORESOURCE_MEM;
|
|
|
|
|
|
|
|
nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
|
|
|
|
if (nspm->id < 0) {
|
|
|
|
kfree(nspm);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
|
|
|
|
dev->groups = nd_namespace_attribute_groups;
|
|
|
|
nd_namespace_pmem_set_resource(nd_region, nspm, 0);
|
|
|
|
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nd_region_create_ns_seed(struct nd_region *nd_region)
|
2015-05-02 01:34:01 +08:00
|
|
|
{
|
|
|
|
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
|
2016-10-01 06:28:27 +08:00
|
|
|
|
|
|
|
if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (is_nd_blk(&nd_region->dev))
|
|
|
|
nd_region->ns_seed = nd_namespace_blk_create(nd_region);
|
|
|
|
else
|
|
|
|
nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
|
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
/*
|
|
|
|
* Seed creation failures are not fatal, provisioning is simply
|
|
|
|
* disabled until memory becomes available
|
|
|
|
*/
|
|
|
|
if (!nd_region->ns_seed)
|
2016-10-01 06:28:27 +08:00
|
|
|
dev_err(&nd_region->dev, "failed to create %s namespace\n",
|
|
|
|
is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
|
2015-05-02 01:34:01 +08:00
|
|
|
else
|
|
|
|
nd_device_register(nd_region->ns_seed);
|
|
|
|
}
|
|
|
|
|
2016-03-12 02:15:36 +08:00
|
|
|
void nd_region_create_dax_seed(struct nd_region *nd_region)
|
|
|
|
{
|
|
|
|
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
|
|
|
|
nd_region->dax_seed = nd_dax_create(nd_region);
|
|
|
|
/*
|
|
|
|
* Seed creation failures are not fatal, provisioning is simply
|
|
|
|
* disabled until memory becomes available
|
|
|
|
*/
|
|
|
|
if (!nd_region->dax_seed)
|
|
|
|
dev_err(&nd_region->dev, "failed to create dax namespace\n");
|
|
|
|
}
|
|
|
|
|
2015-12-14 03:41:36 +08:00
|
|
|
void nd_region_create_pfn_seed(struct nd_region *nd_region)
|
|
|
|
{
|
|
|
|
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
|
|
|
|
nd_region->pfn_seed = nd_pfn_create(nd_region);
|
|
|
|
/*
|
|
|
|
* Seed creation failures are not fatal, provisioning is simply
|
|
|
|
* disabled until memory becomes available
|
|
|
|
*/
|
|
|
|
if (!nd_region->pfn_seed)
|
|
|
|
dev_err(&nd_region->dev, "failed to create pfn namespace\n");
|
|
|
|
}
|
|
|
|
|
2015-06-25 16:20:04 +08:00
|
|
|
void nd_region_create_btt_seed(struct nd_region *nd_region)
|
|
|
|
{
|
|
|
|
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
|
|
|
|
nd_region->btt_seed = nd_btt_create(nd_region);
|
|
|
|
/*
|
|
|
|
* Seed creation failures are not fatal, provisioning is simply
|
|
|
|
* disabled until memory becomes available
|
|
|
|
*/
|
|
|
|
if (!nd_region->btt_seed)
|
|
|
|
dev_err(&nd_region->dev, "failed to create btt namespace\n");
|
|
|
|
}
|
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
static int add_namespace_resource(struct nd_region *nd_region,
|
|
|
|
struct nd_namespace_label *nd_label, struct device **devs,
|
|
|
|
int count)
|
2015-05-02 01:34:01 +08:00
|
|
|
{
|
2016-09-23 06:42:59 +08:00
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
u8 *uuid = namespace_to_uuid(devs[i]);
|
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(uuid)) {
|
|
|
|
WARN_ON(1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
|
|
|
|
continue;
|
|
|
|
if (is_namespace_blk(devs[i])) {
|
|
|
|
res = nsblk_add_resource(nd_region, ndd,
|
|
|
|
to_nd_namespace_blk(devs[i]),
|
|
|
|
__le64_to_cpu(nd_label->dpa));
|
|
|
|
if (!res)
|
|
|
|
return -ENXIO;
|
|
|
|
nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
|
|
|
|
} else {
|
|
|
|
dev_err(&nd_region->dev,
|
|
|
|
"error: conflicting extents for uuid: %pUb\n",
|
|
|
|
nd_label->uuid);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2017-10-05 17:55:57 +08:00
|
|
|
static struct device *create_namespace_blk(struct nd_region *nd_region,
|
2016-09-23 06:42:59 +08:00
|
|
|
struct nd_namespace_label *nd_label, int count)
|
|
|
|
{
|
|
|
|
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
|
2017-06-07 02:10:51 +08:00
|
|
|
struct nd_interleave_set *nd_set = nd_region->nd_set;
|
2016-09-20 07:04:21 +08:00
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_namespace_blk *nsblk;
|
2016-11-27 03:18:04 +08:00
|
|
|
char name[NSLABEL_NAME_LEN];
|
2016-09-23 06:42:59 +08:00
|
|
|
struct device *dev = NULL;
|
|
|
|
struct resource *res;
|
|
|
|
|
2017-06-07 02:39:30 +08:00
|
|
|
if (namespace_label_has(ndd, type_guid)) {
|
|
|
|
if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
|
|
|
|
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
|
2019-06-21 19:45:18 +08:00
|
|
|
&nd_set->type_guid,
|
|
|
|
&nd_label->type_guid);
|
2017-06-07 02:39:30 +08:00
|
|
|
return ERR_PTR(-EAGAIN);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
|
|
|
|
dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
|
|
|
|
nd_set->cookie2,
|
|
|
|
__le64_to_cpu(nd_label->isetcookie));
|
|
|
|
return ERR_PTR(-EAGAIN);
|
|
|
|
}
|
2017-06-07 02:10:51 +08:00
|
|
|
}
|
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
|
|
|
|
if (!nsblk)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
dev = &nsblk->common.dev;
|
|
|
|
dev->type = &namespace_blk_device_type;
|
|
|
|
dev->parent = &nd_region->dev;
|
|
|
|
nsblk->id = -1;
|
|
|
|
nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
|
|
|
|
nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
|
|
|
|
GFP_KERNEL);
|
2017-06-04 09:18:39 +08:00
|
|
|
if (namespace_label_has(ndd, abstraction_guid))
|
|
|
|
nsblk->common.claim_class
|
|
|
|
= to_nvdimm_cclass(&nd_label->abstraction_guid);
|
2016-09-23 06:42:59 +08:00
|
|
|
if (!nsblk->uuid)
|
|
|
|
goto blk_err;
|
|
|
|
memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
|
2019-03-12 16:20:34 +08:00
|
|
|
if (name[0]) {
|
2016-09-23 06:42:59 +08:00
|
|
|
nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
|
|
|
|
GFP_KERNEL);
|
2019-03-12 16:20:34 +08:00
|
|
|
if (!nsblk->alt_name)
|
|
|
|
goto blk_err;
|
|
|
|
}
|
2016-09-23 06:42:59 +08:00
|
|
|
res = nsblk_add_resource(nd_region, ndd, nsblk,
|
|
|
|
__le64_to_cpu(nd_label->dpa));
|
|
|
|
if (!res)
|
|
|
|
goto blk_err;
|
|
|
|
nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
|
|
|
|
return dev;
|
|
|
|
blk_err:
|
|
|
|
namespace_blk_release(dev);
|
|
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
}
|
|
|
|
|
2016-10-06 05:04:15 +08:00
|
|
|
static int cmp_dpa(const void *a, const void *b)
|
|
|
|
{
|
|
|
|
const struct device *dev_a = *(const struct device **) a;
|
|
|
|
const struct device *dev_b = *(const struct device **) b;
|
|
|
|
struct nd_namespace_blk *nsblk_a, *nsblk_b;
|
|
|
|
struct nd_namespace_pmem *nspm_a, *nspm_b;
|
|
|
|
|
|
|
|
if (is_namespace_io(dev_a))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (is_namespace_blk(dev_a)) {
|
|
|
|
nsblk_a = to_nd_namespace_blk(dev_a);
|
|
|
|
nsblk_b = to_nd_namespace_blk(dev_b);
|
|
|
|
|
|
|
|
return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
|
|
|
|
sizeof(resource_size_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
nspm_a = to_nd_namespace_pmem(dev_a);
|
|
|
|
nspm_b = to_nd_namespace_pmem(dev_b);
|
|
|
|
|
|
|
|
return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
|
|
|
|
sizeof(resource_size_t));
|
|
|
|
}
|
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
static struct device **scan_labels(struct nd_region *nd_region)
|
|
|
|
{
|
2016-10-06 06:54:46 +08:00
|
|
|
int i, count = 0;
|
2016-09-23 06:42:59 +08:00
|
|
|
struct device *dev, **devs = NULL;
|
|
|
|
struct nd_label_ent *label_ent, *e;
|
2016-10-06 06:54:46 +08:00
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
|
|
|
|
resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
|
2015-05-02 01:34:01 +08:00
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
/* "safe" because create_namespace_pmem() might list_move() label_ent */
|
|
|
|
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
|
2016-09-20 07:04:21 +08:00
|
|
|
struct nd_namespace_label *nd_label = label_ent->label;
|
2015-05-02 01:34:01 +08:00
|
|
|
struct device **__devs;
|
2016-09-20 07:04:21 +08:00
|
|
|
u32 flags;
|
2015-05-02 01:34:01 +08:00
|
|
|
|
2016-09-20 07:04:21 +08:00
|
|
|
if (!nd_label)
|
|
|
|
continue;
|
|
|
|
flags = __le32_to_cpu(nd_label->flags);
|
2016-09-23 06:42:59 +08:00
|
|
|
if (is_nd_blk(&nd_region->dev)
|
|
|
|
== !!(flags & NSLABEL_FLAG_LOCAL))
|
|
|
|
/* pass, region matches label type */;
|
2015-05-02 01:34:01 +08:00
|
|
|
else
|
|
|
|
continue;
|
|
|
|
|
2016-10-06 06:54:46 +08:00
|
|
|
/* skip labels that describe extents outside of the region */
|
2019-08-09 15:47:26 +08:00
|
|
|
if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
|
|
|
|
__le64_to_cpu(nd_label->dpa) > map_end)
|
|
|
|
continue;
|
2016-10-06 06:54:46 +08:00
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
i = add_namespace_resource(nd_region, nd_label, devs, count);
|
|
|
|
if (i < 0)
|
|
|
|
goto err;
|
2015-05-02 01:34:01 +08:00
|
|
|
if (i < count)
|
|
|
|
continue;
|
|
|
|
__devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
|
|
|
|
if (!__devs)
|
|
|
|
goto err;
|
|
|
|
memcpy(__devs, devs, sizeof(dev) * count);
|
|
|
|
kfree(devs);
|
|
|
|
devs = __devs;
|
|
|
|
|
2017-06-07 02:10:51 +08:00
|
|
|
if (is_nd_blk(&nd_region->dev))
|
2016-09-23 06:42:59 +08:00
|
|
|
dev = create_namespace_blk(nd_region, nd_label, count);
|
2017-06-07 02:10:51 +08:00
|
|
|
else {
|
2017-06-04 09:59:15 +08:00
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
struct nd_namespace_index *nsindex;
|
|
|
|
|
|
|
|
nsindex = to_namespace_index(ndd, ndd->ns_current);
|
|
|
|
dev = create_namespace_pmem(nd_region, nsindex, nd_label);
|
2016-09-23 06:42:59 +08:00
|
|
|
}
|
2017-06-07 02:10:51 +08:00
|
|
|
|
|
|
|
if (IS_ERR(dev)) {
|
|
|
|
switch (PTR_ERR(dev)) {
|
|
|
|
case -EAGAIN:
|
|
|
|
/* skip invalid labels */
|
|
|
|
continue;
|
|
|
|
case -ENODEV:
|
|
|
|
/* fallthrough to seed creation */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
devs[count++] = dev;
|
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
}
|
|
|
|
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
|
|
|
|
count, is_nd_blk(&nd_region->dev)
|
2016-09-23 06:42:59 +08:00
|
|
|
? "blk" : "pmem", count == 1 ? "" : "s");
|
2015-05-02 01:34:01 +08:00
|
|
|
|
|
|
|
if (count == 0) {
|
|
|
|
/* Publish a zero-sized namespace for userspace to configure. */
|
2016-09-20 07:04:21 +08:00
|
|
|
nd_mapping_free_labels(nd_mapping);
|
2015-05-02 01:34:01 +08:00
|
|
|
|
|
|
|
devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
|
|
|
|
if (!devs)
|
|
|
|
goto err;
|
2016-09-23 06:42:59 +08:00
|
|
|
if (is_nd_blk(&nd_region->dev)) {
|
|
|
|
struct nd_namespace_blk *nsblk;
|
|
|
|
|
|
|
|
nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
|
|
|
|
if (!nsblk)
|
|
|
|
goto err;
|
|
|
|
dev = &nsblk->common.dev;
|
|
|
|
dev->type = &namespace_blk_device_type;
|
|
|
|
} else {
|
|
|
|
struct nd_namespace_pmem *nspm;
|
|
|
|
|
|
|
|
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
|
|
|
|
if (!nspm)
|
|
|
|
goto err;
|
|
|
|
dev = &nspm->nsio.common.dev;
|
|
|
|
dev->type = &namespace_pmem_device_type;
|
2016-10-07 14:13:15 +08:00
|
|
|
nd_namespace_pmem_set_resource(nd_region, nspm, 0);
|
2016-09-23 06:42:59 +08:00
|
|
|
}
|
2015-05-02 01:34:01 +08:00
|
|
|
dev->parent = &nd_region->dev;
|
|
|
|
devs[count++] = dev;
|
2017-05-30 14:12:19 +08:00
|
|
|
} else if (is_memory(&nd_region->dev)) {
|
2016-09-23 06:42:59 +08:00
|
|
|
/* clean unselected labels */
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
2016-10-07 14:13:15 +08:00
|
|
|
struct list_head *l, *e;
|
|
|
|
LIST_HEAD(list);
|
|
|
|
int j;
|
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
nd_mapping = &nd_region->mapping[i];
|
|
|
|
if (list_empty(&nd_mapping->labels)) {
|
|
|
|
WARN_ON(1);
|
|
|
|
continue;
|
|
|
|
}
|
2016-10-07 14:13:15 +08:00
|
|
|
|
|
|
|
j = count;
|
|
|
|
list_for_each_safe(l, e, &nd_mapping->labels) {
|
|
|
|
if (!j--)
|
|
|
|
break;
|
|
|
|
list_move_tail(l, &list);
|
|
|
|
}
|
2016-09-23 06:42:59 +08:00
|
|
|
nd_mapping_free_labels(nd_mapping);
|
2016-10-07 14:13:15 +08:00
|
|
|
list_splice_init(&list, &nd_mapping->labels);
|
2016-09-23 06:42:59 +08:00
|
|
|
}
|
2015-05-02 01:34:01 +08:00
|
|
|
}
|
|
|
|
|
2016-10-06 05:04:15 +08:00
|
|
|
if (count > 1)
|
|
|
|
sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
|
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
return devs;
|
|
|
|
|
2016-09-20 07:04:21 +08:00
|
|
|
err:
|
2016-10-12 14:34:29 +08:00
|
|
|
if (devs) {
|
|
|
|
for (i = 0; devs[i]; i++)
|
|
|
|
if (is_nd_blk(&nd_region->dev))
|
|
|
|
namespace_blk_release(devs[i]);
|
|
|
|
else
|
|
|
|
namespace_pmem_release(devs[i]);
|
|
|
|
kfree(devs);
|
|
|
|
}
|
2015-05-02 01:34:01 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
static struct device **create_namespaces(struct nd_region *nd_region)
|
2016-09-20 07:04:21 +08:00
|
|
|
{
|
2018-01-31 01:47:07 +08:00
|
|
|
struct nd_mapping *nd_mapping;
|
2016-09-20 07:04:21 +08:00
|
|
|
struct device **devs;
|
2016-09-23 06:42:59 +08:00
|
|
|
int i;
|
2016-09-20 07:04:21 +08:00
|
|
|
|
|
|
|
if (nd_region->ndr_mappings == 0)
|
|
|
|
return NULL;
|
|
|
|
|
2016-09-23 06:42:59 +08:00
|
|
|
/* lock down all mappings while we scan labels */
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
nd_mapping = &nd_region->mapping[i];
|
|
|
|
mutex_lock_nested(&nd_mapping->lock, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
devs = scan_labels(nd_region);
|
|
|
|
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
int reverse = nd_region->ndr_mappings - 1 - i;
|
|
|
|
|
|
|
|
nd_mapping = &nd_region->mapping[reverse];
|
|
|
|
mutex_unlock(&nd_mapping->lock);
|
|
|
|
}
|
2016-09-20 07:04:21 +08:00
|
|
|
|
|
|
|
return devs;
|
|
|
|
}
|
|
|
|
|
2019-09-05 23:45:57 +08:00
|
|
|
static void deactivate_labels(void *region)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = region;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
|
|
struct nvdimm_drvdata *ndd = nd_mapping->ndd;
|
|
|
|
struct nvdimm *nvdimm = nd_mapping->nvdimm;
|
|
|
|
|
|
|
|
mutex_lock(&nd_mapping->lock);
|
|
|
|
nd_mapping_free_labels(nd_mapping);
|
|
|
|
mutex_unlock(&nd_mapping->lock);
|
|
|
|
|
|
|
|
put_ndd(ndd);
|
|
|
|
nd_mapping->ndd = NULL;
|
|
|
|
if (ndd)
|
|
|
|
atomic_dec(&nvdimm->busy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-18 05:14:46 +08:00
|
|
|
static int init_active_labels(struct nd_region *nd_region)
|
|
|
|
{
|
2024-06-12 13:13:20 +08:00
|
|
|
int i, rc = 0;
|
2015-06-18 05:14:46 +08:00
|
|
|
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
|
|
struct nvdimm *nvdimm = nd_mapping->nvdimm;
|
2016-09-20 07:04:21 +08:00
|
|
|
struct nd_label_ent *label_ent;
|
2015-06-18 05:14:46 +08:00
|
|
|
int count, j;
|
|
|
|
|
|
|
|
/*
|
2017-05-05 02:47:22 +08:00
|
|
|
* If the dimm is disabled then we may need to prevent
|
|
|
|
* the region from being activated.
|
2015-06-18 05:14:46 +08:00
|
|
|
*/
|
|
|
|
if (!ndd) {
|
2017-05-05 02:47:22 +08:00
|
|
|
if (test_bit(NDD_LOCKED, &nvdimm->flags))
|
|
|
|
/* fail, label data may be unreadable */;
|
|
|
|
else if (test_bit(NDD_ALIASING, &nvdimm->flags))
|
|
|
|
/* fail, labels needed to disambiguate dpa */;
|
|
|
|
else
|
2024-06-12 13:13:20 +08:00
|
|
|
continue;
|
2017-05-05 02:47:22 +08:00
|
|
|
|
|
|
|
dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
|
|
|
|
dev_name(&nd_mapping->nvdimm->dev),
|
|
|
|
test_bit(NDD_LOCKED, &nvdimm->flags)
|
|
|
|
? "locked" : "disabled");
|
2024-06-12 13:13:20 +08:00
|
|
|
rc = -ENXIO;
|
|
|
|
goto out;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
nd_mapping->ndd = ndd;
|
|
|
|
atomic_inc(&nvdimm->busy);
|
|
|
|
get_ndd(ndd);
|
|
|
|
|
|
|
|
count = nd_label_active_count(ndd);
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(ndd->dev, "count: %d\n", count);
|
2015-06-18 05:14:46 +08:00
|
|
|
if (!count)
|
|
|
|
continue;
|
|
|
|
for (j = 0; j < count; j++) {
|
|
|
|
struct nd_namespace_label *label;
|
|
|
|
|
2016-09-20 07:04:21 +08:00
|
|
|
label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
|
|
|
|
if (!label_ent)
|
|
|
|
break;
|
2015-06-18 05:14:46 +08:00
|
|
|
label = nd_label_active(ndd, j);
|
2019-02-03 08:35:26 +08:00
|
|
|
if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
|
|
|
|
u32 flags = __le32_to_cpu(label->flags);
|
|
|
|
|
|
|
|
flags &= ~NSLABEL_FLAG_LOCAL;
|
|
|
|
label->flags = __cpu_to_le32(flags);
|
|
|
|
}
|
2016-09-20 07:04:21 +08:00
|
|
|
label_ent->label = label;
|
|
|
|
|
|
|
|
mutex_lock(&nd_mapping->lock);
|
|
|
|
list_add_tail(&label_ent->list, &nd_mapping->labels);
|
|
|
|
mutex_unlock(&nd_mapping->lock);
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
2016-09-20 07:04:21 +08:00
|
|
|
|
2019-09-05 23:45:57 +08:00
|
|
|
if (j < count)
|
|
|
|
break;
|
|
|
|
}
|
2016-09-20 07:04:21 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
if (i < nd_region->ndr_mappings)
|
|
|
|
rc = -ENOMEM;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (rc) {
|
2019-09-05 23:45:57 +08:00
|
|
|
deactivate_labels(nd_region);
|
2024-06-12 13:13:20 +08:00
|
|
|
return rc;
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
2019-09-05 23:45:57 +08:00
|
|
|
return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
|
2024-06-12 13:13:20 +08:00
|
|
|
nd_region);
|
2015-06-18 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
2015-06-01 03:02:11 +08:00
|
|
|
int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
|
|
|
|
{
|
|
|
|
struct device **devs = NULL;
|
2015-06-18 05:14:46 +08:00
|
|
|
int i, rc = 0, type;
|
2015-06-01 03:02:11 +08:00
|
|
|
|
|
|
|
*err = 0;
|
2015-06-18 05:14:46 +08:00
|
|
|
nvdimm_bus_lock(&nd_region->dev);
|
|
|
|
rc = init_active_labels(nd_region);
|
|
|
|
if (rc) {
|
|
|
|
nvdimm_bus_unlock(&nd_region->dev);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
type = nd_region_to_nstype(nd_region);
|
|
|
|
switch (type) {
|
2015-06-01 03:02:11 +08:00
|
|
|
case ND_DEVICE_NAMESPACE_IO:
|
|
|
|
devs = create_namespace_io(nd_region);
|
|
|
|
break;
|
2015-06-18 05:14:46 +08:00
|
|
|
case ND_DEVICE_NAMESPACE_PMEM:
|
2015-05-02 01:34:01 +08:00
|
|
|
case ND_DEVICE_NAMESPACE_BLK:
|
2016-09-23 06:42:59 +08:00
|
|
|
devs = create_namespaces(nd_region);
|
2015-05-02 01:34:01 +08:00
|
|
|
break;
|
2015-06-01 03:02:11 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2015-06-18 05:14:46 +08:00
|
|
|
nvdimm_bus_unlock(&nd_region->dev);
|
2015-06-01 03:02:11 +08:00
|
|
|
|
|
|
|
if (!devs)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
for (i = 0; devs[i]; i++) {
|
|
|
|
struct device *dev = devs[i];
|
2015-05-02 01:34:01 +08:00
|
|
|
int id;
|
2015-06-01 03:02:11 +08:00
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
if (type == ND_DEVICE_NAMESPACE_BLK) {
|
|
|
|
struct nd_namespace_blk *nsblk;
|
|
|
|
|
|
|
|
nsblk = to_nd_namespace_blk(dev);
|
|
|
|
id = ida_simple_get(&nd_region->ns_ida, 0, 0,
|
|
|
|
GFP_KERNEL);
|
|
|
|
nsblk->id = id;
|
2016-10-07 14:13:15 +08:00
|
|
|
} else if (type == ND_DEVICE_NAMESPACE_PMEM) {
|
|
|
|
struct nd_namespace_pmem *nspm;
|
|
|
|
|
|
|
|
nspm = to_nd_namespace_pmem(dev);
|
|
|
|
id = ida_simple_get(&nd_region->ns_ida, 0, 0,
|
|
|
|
GFP_KERNEL);
|
|
|
|
nspm->id = id;
|
2015-05-02 01:34:01 +08:00
|
|
|
} else
|
|
|
|
id = i;
|
|
|
|
|
|
|
|
if (id < 0)
|
|
|
|
break;
|
|
|
|
dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
|
2015-06-01 03:02:11 +08:00
|
|
|
dev->groups = nd_namespace_attribute_groups;
|
|
|
|
nd_device_register(dev);
|
|
|
|
}
|
2015-05-02 01:34:01 +08:00
|
|
|
if (i)
|
|
|
|
nd_region->ns_seed = devs[0];
|
|
|
|
|
|
|
|
if (devs[i]) {
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = i; devs[j]; j++) {
|
|
|
|
struct device *dev = devs[j];
|
|
|
|
|
|
|
|
device_initialize(dev);
|
|
|
|
put_device(dev);
|
|
|
|
}
|
|
|
|
*err = j - i;
|
|
|
|
/*
|
|
|
|
* All of the namespaces we tried to register failed, so
|
|
|
|
* fail region activation.
|
|
|
|
*/
|
|
|
|
if (*err == 0)
|
|
|
|
rc = -ENODEV;
|
|
|
|
}
|
2015-06-01 03:02:11 +08:00
|
|
|
kfree(devs);
|
|
|
|
|
2015-05-02 01:34:01 +08:00
|
|
|
if (rc == -ENODEV)
|
|
|
|
return rc;
|
|
|
|
|
2015-06-01 03:02:11 +08:00
|
|
|
return i;
|
|
|
|
}
|