2015-05-20 10:54:31 +08:00
|
|
|
/*
|
|
|
|
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*/
|
|
|
|
#ifndef __ND_CORE_H__
|
|
|
|
#define __ND_CORE_H__
|
|
|
|
#include <linux/libnvdimm.h>
|
|
|
|
#include <linux/device.h>
|
2015-05-02 01:11:27 +08:00
|
|
|
#include <linux/sizes.h>
|
|
|
|
#include <linux/mutex.h>
|
2015-05-02 01:34:01 +08:00
|
|
|
#include <linux/nd.h>
|
2015-05-20 10:54:31 +08:00
|
|
|
|
2015-04-25 15:56:17 +08:00
|
|
|
extern struct list_head nvdimm_bus_list;
|
|
|
|
extern struct mutex nvdimm_bus_list_mutex;
|
2015-06-09 02:27:06 +08:00
|
|
|
extern int nvdimm_major;
|
2015-04-25 15:56:17 +08:00
|
|
|
|
2015-05-20 10:54:31 +08:00
|
|
|
struct nvdimm_bus {
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc;
|
2015-05-02 01:11:27 +08:00
|
|
|
wait_queue_head_t probe_wait;
|
2015-04-27 07:26:48 +08:00
|
|
|
struct list_head list;
|
2015-05-20 10:54:31 +08:00
|
|
|
struct device dev;
|
2015-05-02 01:11:27 +08:00
|
|
|
int id, probe_active;
|
2016-06-07 08:42:38 +08:00
|
|
|
struct list_head mapping_list;
|
2015-06-01 03:02:11 +08:00
|
|
|
struct mutex reconfig_mutex;
|
2017-08-24 03:48:26 +08:00
|
|
|
struct badrange badrange;
|
2015-05-20 10:54:31 +08:00
|
|
|
};
|
2015-04-27 07:26:48 +08:00
|
|
|
|
2015-04-25 15:56:17 +08:00
|
|
|
struct nvdimm {
|
|
|
|
unsigned long flags;
|
|
|
|
void *provider_data;
|
2016-04-29 07:17:07 +08:00
|
|
|
unsigned long cmd_mask;
|
2015-04-25 15:56:17 +08:00
|
|
|
struct device dev;
|
2015-05-02 01:11:27 +08:00
|
|
|
atomic_t busy;
|
2016-06-08 08:00:04 +08:00
|
|
|
int id, num_flush;
|
|
|
|
struct resource *flush_wpq;
|
2015-04-25 15:56:17 +08:00
|
|
|
};
|
|
|
|
|
2016-10-05 07:09:59 +08:00
|
|
|
/**
|
|
|
|
* struct blk_alloc_info - tracking info for BLK dpa scanning
|
|
|
|
* @nd_mapping: blk region mapping boundaries
|
|
|
|
* @available: decremented in alias_dpa_busy as aliased PMEM is scanned
|
|
|
|
* @busy: decremented in blk_dpa_busy to account for ranges already
|
|
|
|
* handled by alias_dpa_busy
|
|
|
|
* @res: alias_dpa_busy interprets this a free space range that needs to
|
|
|
|
* be truncated to the valid BLK allocation starting DPA, blk_dpa_busy
|
|
|
|
* treats it as a busy range that needs the aliased PMEM ranges
|
|
|
|
* truncated.
|
|
|
|
*/
|
|
|
|
struct blk_alloc_info {
|
|
|
|
struct nd_mapping *nd_mapping;
|
|
|
|
resource_size_t available, busy;
|
|
|
|
struct resource *res;
|
|
|
|
};
|
|
|
|
|
2015-06-01 03:02:11 +08:00
|
|
|
bool is_nvdimm(struct device *dev);
|
|
|
|
bool is_nd_pmem(struct device *dev);
|
2017-05-30 14:12:19 +08:00
|
|
|
bool is_nd_volatile(struct device *dev);
|
2015-06-25 16:21:02 +08:00
|
|
|
bool is_nd_blk(struct device *dev);
|
2017-05-30 14:12:19 +08:00
|
|
|
static inline bool is_nd_region(struct device *dev)
|
|
|
|
{
|
|
|
|
return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev);
|
|
|
|
}
|
|
|
|
static inline bool is_memory(struct device *dev)
|
|
|
|
{
|
|
|
|
return is_nd_pmem(dev) || is_nd_volatile(dev);
|
|
|
|
}
|
2015-04-25 15:56:17 +08:00
|
|
|
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
|
2015-04-27 07:26:48 +08:00
|
|
|
int __init nvdimm_bus_init(void);
|
2015-06-01 02:41:48 +08:00
|
|
|
void nvdimm_bus_exit(void);
|
2016-05-18 11:24:16 +08:00
|
|
|
void nvdimm_devs_exit(void);
|
|
|
|
void nd_region_devs_exit(void);
|
2015-05-02 01:11:27 +08:00
|
|
|
void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev);
|
2015-05-02 01:34:01 +08:00
|
|
|
struct nd_region;
|
2016-10-01 06:28:27 +08:00
|
|
|
void nd_region_create_ns_seed(struct nd_region *nd_region);
|
2015-06-25 16:20:04 +08:00
|
|
|
void nd_region_create_btt_seed(struct nd_region *nd_region);
|
2015-12-14 03:41:36 +08:00
|
|
|
void nd_region_create_pfn_seed(struct nd_region *nd_region);
|
2016-03-12 02:15:36 +08:00
|
|
|
void nd_region_create_dax_seed(struct nd_region *nd_region);
|
2015-05-02 01:11:27 +08:00
|
|
|
void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev);
|
2015-04-27 07:26:48 +08:00
|
|
|
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
|
|
|
|
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
|
2015-06-01 02:41:48 +08:00
|
|
|
void nd_synchronize(void);
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
int nvdimm_bus_register_dimms(struct nvdimm_bus *nvdimm_bus);
|
|
|
|
int nvdimm_bus_register_regions(struct nvdimm_bus *nvdimm_bus);
|
2015-05-02 01:11:27 +08:00
|
|
|
int nvdimm_bus_init_interleave_sets(struct nvdimm_bus *nvdimm_bus);
|
2015-06-25 16:20:04 +08:00
|
|
|
void __nd_device_register(struct device *dev);
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-10 08:13:14 +08:00
|
|
|
int nd_match_dimm(struct device *dev, void *data);
|
2015-06-18 05:14:46 +08:00
|
|
|
struct nd_label_id;
|
|
|
|
char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags);
|
|
|
|
bool nd_is_uuid_unique(struct device *dev, u8 *uuid);
|
|
|
|
struct nd_region;
|
|
|
|
struct nvdimm_drvdata;
|
|
|
|
struct nd_mapping;
|
2016-09-20 07:04:21 +08:00
|
|
|
void nd_mapping_free_labels(struct nd_mapping *nd_mapping);
|
2018-07-25 05:07:57 +08:00
|
|
|
|
|
|
|
int __reserve_free_pmem(struct device *dev, void *data);
|
|
|
|
void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
|
|
|
|
struct nd_mapping *nd_mapping);
|
|
|
|
|
|
|
|
resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
|
|
|
|
struct nd_mapping *nd_mapping);
|
|
|
|
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
|
2015-06-18 05:14:46 +08:00
|
|
|
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
|
|
|
|
struct nd_mapping *nd_mapping, resource_size_t *overlap);
|
2016-10-01 08:28:58 +08:00
|
|
|
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
|
2015-06-18 05:14:46 +08:00
|
|
|
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
|
|
|
|
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
|
|
|
|
struct nd_label_id *label_id);
|
2016-10-05 07:09:59 +08:00
|
|
|
int alias_dpa_busy(struct device *dev, void *data);
|
2015-05-02 01:34:01 +08:00
|
|
|
struct resource *nsblk_add_resource(struct nd_region *nd_region,
|
|
|
|
struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
|
|
|
|
resource_size_t start);
|
2015-05-31 00:35:36 +08:00
|
|
|
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
|
2015-06-18 05:14:46 +08:00
|
|
|
void get_ndd(struct nvdimm_drvdata *ndd);
|
2015-06-25 16:20:04 +08:00
|
|
|
resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
|
2015-07-31 05:57:47 +08:00
|
|
|
void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
|
|
|
|
void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
|
|
|
|
bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
|
|
|
|
struct nd_namespace_common **_ndns);
|
|
|
|
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
|
|
|
|
struct nd_namespace_common **_ndns);
|
|
|
|
ssize_t nd_namespace_store(struct device *dev,
|
|
|
|
struct nd_namespace_common **_ndns, const char *buf,
|
|
|
|
size_t len);
|
2016-05-22 03:22:41 +08:00
|
|
|
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
|
2015-05-20 10:54:31 +08:00
|
|
|
#endif /* __ND_CORE_H__ */
|