2019-02-18 18:36:11 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-06-22 00:04:20 +08:00
|
|
|
/*
|
|
|
|
* Configfs interface for the NVMe target.
|
|
|
|
* Copyright (c) 2015-2016 HGST, a Western Digital Company.
|
|
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/ctype.h>
|
2018-10-05 05:27:47 +08:00
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/pci-p2pdma.h>
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
#include "nvmet.h"
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_host_type;
|
|
|
|
static const struct config_item_type nvmet_subsys_type;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2018-11-13 05:56:40 +08:00
|
|
|
static LIST_HEAD(nvmet_ports_list);
|
|
|
|
struct list_head *nvmet_ports = &nvmet_ports_list;
|
|
|
|
|
2018-03-21 03:41:34 +08:00
|
|
|
static const struct nvmet_transport_name {
|
|
|
|
u8 type;
|
|
|
|
const char *name;
|
|
|
|
} nvmet_transport_names[] = {
|
|
|
|
{ NVMF_TRTYPE_RDMA, "rdma" },
|
|
|
|
{ NVMF_TRTYPE_FC, "fc" },
|
2018-12-04 09:52:16 +08:00
|
|
|
{ NVMF_TRTYPE_TCP, "tcp" },
|
2018-03-21 03:41:34 +08:00
|
|
|
{ NVMF_TRTYPE_LOOP, "loop" },
|
|
|
|
};
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
/*
|
|
|
|
* nvmet_port Generic ConfigFS definitions.
|
|
|
|
* Used in any place in the ConfigFS tree that refers to an address.
|
|
|
|
*/
|
|
|
|
static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
switch (to_nvmet_port(item)->disc_addr.adrfam) {
|
|
|
|
case NVMF_ADDR_FAMILY_IP4:
|
|
|
|
return sprintf(page, "ipv4\n");
|
|
|
|
case NVMF_ADDR_FAMILY_IP6:
|
|
|
|
return sprintf(page, "ipv6\n");
|
|
|
|
case NVMF_ADDR_FAMILY_IB:
|
|
|
|
return sprintf(page, "ib\n");
|
2016-10-22 04:32:51 +08:00
|
|
|
case NVMF_ADDR_FAMILY_FC:
|
|
|
|
return sprintf(page, "fc\n");
|
2016-06-22 00:04:20 +08:00
|
|
|
default:
|
|
|
|
return sprintf(page, "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
|
|
|
|
if (port->enabled) {
|
|
|
|
pr_err("Cannot modify address while enabled\n");
|
|
|
|
pr_err("Disable the address before modifying\n");
|
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sysfs_streq(page, "ipv4")) {
|
|
|
|
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4;
|
|
|
|
} else if (sysfs_streq(page, "ipv6")) {
|
|
|
|
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
|
|
|
|
} else if (sysfs_streq(page, "ib")) {
|
|
|
|
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
|
2016-10-22 04:32:51 +08:00
|
|
|
} else if (sysfs_streq(page, "fc")) {
|
|
|
|
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
|
2016-06-22 00:04:20 +08:00
|
|
|
} else {
|
|
|
|
pr_err("Invalid value '%s' for adrfam\n", page);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_, addr_adrfam);
|
|
|
|
|
|
|
|
static ssize_t nvmet_addr_portid_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
|
|
|
|
return snprintf(page, PAGE_SIZE, "%d\n",
|
|
|
|
le16_to_cpu(port->disc_addr.portid));
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_addr_portid_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
u16 portid = 0;
|
|
|
|
|
|
|
|
if (kstrtou16(page, 0, &portid)) {
|
|
|
|
pr_err("Invalid value '%s' for portid\n", page);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (port->enabled) {
|
|
|
|
pr_err("Cannot modify address while enabled\n");
|
|
|
|
pr_err("Disable the address before modifying\n");
|
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
port->disc_addr.portid = cpu_to_le16(portid);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_, addr_portid);
|
|
|
|
|
|
|
|
static ssize_t nvmet_addr_traddr_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
|
|
|
|
return snprintf(page, PAGE_SIZE, "%s\n",
|
|
|
|
port->disc_addr.traddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_addr_traddr_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
|
|
|
|
if (count > NVMF_TRADDR_SIZE) {
|
|
|
|
pr_err("Invalid value '%s' for traddr\n", page);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (port->enabled) {
|
|
|
|
pr_err("Cannot modify address while enabled\n");
|
|
|
|
pr_err("Disable the address before modifying\n");
|
|
|
|
return -EACCES;
|
|
|
|
}
|
2018-06-06 20:27:48 +08:00
|
|
|
|
|
|
|
if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
|
|
|
|
return -EINVAL;
|
|
|
|
return count;
|
2016-06-22 00:04:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_, addr_traddr);
|
|
|
|
|
|
|
|
static ssize_t nvmet_addr_treq_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
2018-11-20 06:11:13 +08:00
|
|
|
switch (to_nvmet_port(item)->disc_addr.treq &
|
|
|
|
NVME_TREQ_SECURE_CHANNEL_MASK) {
|
2016-06-22 00:04:20 +08:00
|
|
|
case NVMF_TREQ_NOT_SPECIFIED:
|
|
|
|
return sprintf(page, "not specified\n");
|
|
|
|
case NVMF_TREQ_REQUIRED:
|
|
|
|
return sprintf(page, "required\n");
|
|
|
|
case NVMF_TREQ_NOT_REQUIRED:
|
|
|
|
return sprintf(page, "not required\n");
|
|
|
|
default:
|
|
|
|
return sprintf(page, "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_addr_treq_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
2018-11-20 06:11:13 +08:00
|
|
|
u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
if (port->enabled) {
|
|
|
|
pr_err("Cannot modify address while enabled\n");
|
|
|
|
pr_err("Disable the address before modifying\n");
|
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sysfs_streq(page, "not specified")) {
|
2018-11-20 06:11:13 +08:00
|
|
|
treq |= NVMF_TREQ_NOT_SPECIFIED;
|
2016-06-22 00:04:20 +08:00
|
|
|
} else if (sysfs_streq(page, "required")) {
|
2018-11-20 06:11:13 +08:00
|
|
|
treq |= NVMF_TREQ_REQUIRED;
|
2016-06-22 00:04:20 +08:00
|
|
|
} else if (sysfs_streq(page, "not required")) {
|
2018-11-20 06:11:13 +08:00
|
|
|
treq |= NVMF_TREQ_NOT_REQUIRED;
|
2016-06-22 00:04:20 +08:00
|
|
|
} else {
|
|
|
|
pr_err("Invalid value '%s' for treq\n", page);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-11-20 06:11:13 +08:00
|
|
|
port->disc_addr.treq = treq;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_, addr_treq);
|
|
|
|
|
|
|
|
static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
|
|
|
|
return snprintf(page, PAGE_SIZE, "%s\n",
|
|
|
|
port->disc_addr.trsvcid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
|
|
|
|
if (count > NVMF_TRSVCID_SIZE) {
|
|
|
|
pr_err("Invalid value '%s' for trsvcid\n", page);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (port->enabled) {
|
|
|
|
pr_err("Cannot modify address while enabled\n");
|
|
|
|
pr_err("Disable the address before modifying\n");
|
|
|
|
return -EACCES;
|
|
|
|
}
|
2018-06-06 20:27:48 +08:00
|
|
|
|
|
|
|
if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
|
|
|
|
return -EINVAL;
|
|
|
|
return count;
|
2016-06-22 00:04:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_, addr_trsvcid);
|
|
|
|
|
nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
The patch enables inline data sizes using up to 4 recv sges, and capping
the size at 16KB or at least 1 page size. So on a 4K page system, up to
16KB is supported, and for a 64K page system 1 page of 64KB is supported.
We avoid > 0 order page allocations for the inline buffers by using
multiple recv sges, one for each page. If the device cannot support
the configured inline data size due to lack of enough recv sges, then
log a warning and reduce the inline size.
Add a new configfs port attribute, called param_inline_data_size,
to allow configuring the size of inline data for a given nvmf port.
The maximum size allowed is still enforced by nvmet-rdma with
NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
And the default size, if not specified via configfs, is still PAGE_SIZE.
This preserves the existing behavior, but allows larger inline sizes
for small page systems. If the configured inline data size exceeds
NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
reduced. If param_inline_data_size is set to 0, then inline data is
disabled for that nvmf port.
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2018-06-20 22:15:10 +08:00
|
|
|
static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
|
|
|
|
return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (port->enabled) {
|
|
|
|
pr_err("Cannot modify inline_data_size while port enabled\n");
|
|
|
|
pr_err("Disable the port before modifying\n");
|
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
ret = kstrtoint(page, 0, &port->inline_data_size);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("Invalid value '%s' for inline_data_size\n", page);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_, param_inline_data_size);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
static ssize_t nvmet_addr_trtype_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
2018-03-21 03:41:34 +08:00
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
|
|
|
|
if (port->disc_addr.trtype != nvmet_transport_names[i].type)
|
|
|
|
continue;
|
|
|
|
return sprintf(page, "%s\n", nvmet_transport_names[i].name);
|
2016-06-22 00:04:20 +08:00
|
|
|
}
|
2018-03-21 03:41:34 +08:00
|
|
|
|
|
|
|
return sprintf(page, "\n");
|
2016-06-22 00:04:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
|
|
|
|
{
|
|
|
|
port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
|
|
|
|
port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
|
|
|
|
port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_addr_trtype_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
2018-03-21 03:41:34 +08:00
|
|
|
int i;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
if (port->enabled) {
|
|
|
|
pr_err("Cannot modify address while enabled\n");
|
|
|
|
pr_err("Disable the address before modifying\n");
|
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
2018-03-21 03:41:34 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
|
|
|
|
if (sysfs_streq(page, nvmet_transport_names[i].name))
|
|
|
|
goto found;
|
2016-06-22 00:04:20 +08:00
|
|
|
}
|
|
|
|
|
2018-03-21 03:41:34 +08:00
|
|
|
pr_err("Invalid value '%s' for trtype\n", page);
|
|
|
|
return -EINVAL;
|
|
|
|
found:
|
|
|
|
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
|
|
|
|
port->disc_addr.trtype = nvmet_transport_names[i].type;
|
|
|
|
if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
|
|
|
|
nvmet_port_init_tsas_rdma(port);
|
2016-06-22 00:04:20 +08:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_, addr_trtype);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Namespace structures & file operation functions below
|
|
|
|
*/
|
|
|
|
static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
|
|
|
|
{
|
|
|
|
return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_ns_device_path_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns = to_nvmet_ns(item);
|
|
|
|
struct nvmet_subsys *subsys = ns->subsys;
|
2018-07-25 14:35:17 +08:00
|
|
|
size_t len;
|
2016-06-22 00:04:20 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
ret = -EBUSY;
|
2016-10-30 16:35:15 +08:00
|
|
|
if (ns->enabled)
|
2016-06-22 00:04:20 +08:00
|
|
|
goto out_unlock;
|
|
|
|
|
2018-07-25 14:35:17 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
len = strcspn(page, "\n");
|
|
|
|
if (!len)
|
|
|
|
goto out_unlock;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2018-07-25 14:35:17 +08:00
|
|
|
kfree(ns->device_path);
|
2016-06-22 00:04:20 +08:00
|
|
|
ret = -ENOMEM;
|
2018-07-25 14:35:17 +08:00
|
|
|
ns->device_path = kstrndup(page, len, GFP_KERNEL);
|
2016-06-22 00:04:20 +08:00
|
|
|
if (!ns->device_path)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
return count;
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_ns_, device_path);
|
|
|
|
|
2018-10-05 05:27:47 +08:00
|
|
|
#ifdef CONFIG_PCI_P2PDMA
|
|
|
|
static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns = to_nvmet_ns(item);
|
|
|
|
|
|
|
|
return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns = to_nvmet_ns(item);
|
|
|
|
struct pci_dev *p2p_dev = NULL;
|
|
|
|
bool use_p2pmem;
|
|
|
|
int ret = count;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
mutex_lock(&ns->subsys->lock);
|
|
|
|
if (ns->enabled) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
|
|
|
|
if (error) {
|
|
|
|
ret = error;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
ns->use_p2pmem = use_p2pmem;
|
|
|
|
pci_dev_put(ns->p2p_dev);
|
|
|
|
ns->p2p_dev = p2p_dev;
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&ns->subsys->lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_ns_, p2pmem);
|
|
|
|
#endif /* CONFIG_PCI_P2PDMA */
|
|
|
|
|
2017-06-07 17:45:33 +08:00
|
|
|
static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
|
|
|
|
{
|
|
|
|
return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns = to_nvmet_ns(item);
|
|
|
|
struct nvmet_subsys *subsys = ns->subsys;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
if (ns->enabled) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (uuid_parse(page, &ns->uuid))
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
2018-03-20 20:20:41 +08:00
|
|
|
CONFIGFS_ATTR(nvmet_ns_, device_uuid);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
|
|
|
|
{
|
|
|
|
return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns = to_nvmet_ns(item);
|
|
|
|
struct nvmet_subsys *subsys = ns->subsys;
|
|
|
|
u8 nguid[16];
|
|
|
|
const char *p = page;
|
|
|
|
int i;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
2016-10-30 16:35:15 +08:00
|
|
|
if (ns->enabled) {
|
2016-06-22 00:04:20 +08:00
|
|
|
ret = -EBUSY;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
if (p + 2 > page + count) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
if (!isxdigit(p[0]) || !isxdigit(p[1])) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
|
|
|
|
p += 2;
|
|
|
|
|
|
|
|
if (*p == '-' || *p == ':')
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&ns->nguid, nguid, sizeof(nguid));
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_ns_, device_nguid);
|
|
|
|
|
2018-06-01 14:59:25 +08:00
|
|
|
static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
|
|
|
|
{
|
|
|
|
return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns = to_nvmet_ns(item);
|
|
|
|
u32 oldgrpid, newgrpid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kstrtou32(page, 0, &newgrpid);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
down_write(&nvmet_ana_sem);
|
|
|
|
oldgrpid = ns->anagrpid;
|
|
|
|
nvmet_ana_group_enabled[newgrpid]++;
|
|
|
|
ns->anagrpid = newgrpid;
|
|
|
|
nvmet_ana_group_enabled[oldgrpid]--;
|
|
|
|
nvmet_ana_chgcnt++;
|
|
|
|
up_write(&nvmet_ana_sem);
|
|
|
|
|
|
|
|
nvmet_send_ana_event(ns->subsys, NULL);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
|
|
|
|
{
|
2016-10-30 16:35:15 +08:00
|
|
|
return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
|
2016-06-22 00:04:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_ns_enable_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns = to_nvmet_ns(item);
|
|
|
|
bool enable;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (strtobool(page, &enable))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (enable)
|
|
|
|
ret = nvmet_ns_enable(ns);
|
|
|
|
else
|
|
|
|
nvmet_ns_disable(ns);
|
|
|
|
|
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_ns_, enable);
|
|
|
|
|
2018-06-20 12:01:41 +08:00
|
|
|
static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
|
|
|
|
{
|
|
|
|
return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns = to_nvmet_ns(item);
|
|
|
|
bool val;
|
|
|
|
|
|
|
|
if (strtobool(page, &val))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&ns->subsys->lock);
|
|
|
|
if (ns->enabled) {
|
|
|
|
pr_err("disable ns before setting buffered_io value.\n");
|
|
|
|
mutex_unlock(&ns->subsys->lock);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ns->buffered_io = val;
|
|
|
|
mutex_unlock(&ns->subsys->lock);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_ns_, buffered_io);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
static struct configfs_attribute *nvmet_ns_attrs[] = {
|
|
|
|
&nvmet_ns_attr_device_path,
|
|
|
|
&nvmet_ns_attr_device_nguid,
|
2017-06-07 17:45:33 +08:00
|
|
|
&nvmet_ns_attr_device_uuid,
|
2018-06-01 14:59:25 +08:00
|
|
|
&nvmet_ns_attr_ana_grpid,
|
2016-06-22 00:04:20 +08:00
|
|
|
&nvmet_ns_attr_enable,
|
2018-06-20 12:01:41 +08:00
|
|
|
&nvmet_ns_attr_buffered_io,
|
2018-10-05 05:27:47 +08:00
|
|
|
#ifdef CONFIG_PCI_P2PDMA
|
|
|
|
&nvmet_ns_attr_p2pmem,
|
|
|
|
#endif
|
2016-06-22 00:04:20 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nvmet_ns_release(struct config_item *item)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns = to_nvmet_ns(item);
|
|
|
|
|
|
|
|
nvmet_ns_free(ns);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_item_operations nvmet_ns_item_ops = {
|
|
|
|
.release = nvmet_ns_release,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_ns_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_item_ops = &nvmet_ns_item_ops,
|
|
|
|
.ct_attrs = nvmet_ns_attrs,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct config_group *nvmet_ns_make(struct config_group *group,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
|
|
|
|
struct nvmet_ns *ns;
|
|
|
|
int ret;
|
|
|
|
u32 nsid;
|
|
|
|
|
|
|
|
ret = kstrtou32(name, 0, &nsid);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = -EINVAL;
|
2019-07-04 16:01:48 +08:00
|
|
|
if (nsid == 0 || nsid == NVME_NSID_ALL) {
|
|
|
|
pr_err("invalid nsid %#x", nsid);
|
2016-06-22 00:04:20 +08:00
|
|
|
goto out;
|
2019-07-04 16:01:48 +08:00
|
|
|
}
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
ns = nvmet_ns_alloc(subsys, nsid);
|
|
|
|
if (!ns)
|
|
|
|
goto out;
|
|
|
|
config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
|
|
|
|
|
|
|
|
pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
|
|
|
|
|
|
|
|
return &ns->group;
|
|
|
|
out:
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_group_operations nvmet_namespaces_group_ops = {
|
|
|
|
.make_group = nvmet_ns_make,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_namespaces_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_group_ops = &nvmet_namespaces_group_ops,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int nvmet_port_subsys_allow_link(struct config_item *parent,
|
|
|
|
struct config_item *target)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
|
|
|
|
struct nvmet_subsys *subsys;
|
|
|
|
struct nvmet_subsys_link *link, *p;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (target->ci_type != &nvmet_subsys_type) {
|
|
|
|
pr_err("can only link subsystems into the subsystems dir.!\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
subsys = to_subsys(target);
|
|
|
|
link = kmalloc(sizeof(*link), GFP_KERNEL);
|
|
|
|
if (!link)
|
|
|
|
return -ENOMEM;
|
|
|
|
link->subsys = subsys;
|
|
|
|
|
|
|
|
down_write(&nvmet_config_sem);
|
|
|
|
ret = -EEXIST;
|
|
|
|
list_for_each_entry(p, &port->subsystems, entry) {
|
|
|
|
if (p->subsys == subsys)
|
|
|
|
goto out_free_link;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (list_empty(&port->subsystems)) {
|
|
|
|
ret = nvmet_enable_port(port);
|
|
|
|
if (ret)
|
|
|
|
goto out_free_link;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail(&link->entry, &port->subsystems);
|
2018-11-13 05:56:40 +08:00
|
|
|
nvmet_port_disc_changed(port, subsys);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_free_link:
|
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
kfree(link);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-11-28 20:22:42 +08:00
|
|
|
static void nvmet_port_subsys_drop_link(struct config_item *parent,
|
2016-06-22 00:04:20 +08:00
|
|
|
struct config_item *target)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
|
|
|
|
struct nvmet_subsys *subsys = to_subsys(target);
|
|
|
|
struct nvmet_subsys_link *p;
|
|
|
|
|
|
|
|
down_write(&nvmet_config_sem);
|
|
|
|
list_for_each_entry(p, &port->subsystems, entry) {
|
|
|
|
if (p->subsys == subsys)
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
up_write(&nvmet_config_sem);
|
2016-11-28 20:22:42 +08:00
|
|
|
return;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
found:
|
|
|
|
list_del(&p->entry);
|
2019-08-01 07:35:31 +08:00
|
|
|
nvmet_port_del_ctrls(port, subsys);
|
2018-11-13 05:56:40 +08:00
|
|
|
nvmet_port_disc_changed(port, subsys);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
if (list_empty(&port->subsystems))
|
|
|
|
nvmet_disable_port(port);
|
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
kfree(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_item_operations nvmet_port_subsys_item_ops = {
|
|
|
|
.allow_link = nvmet_port_subsys_allow_link,
|
|
|
|
.drop_link = nvmet_port_subsys_drop_link,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_port_subsys_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_item_ops = &nvmet_port_subsys_item_ops,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
|
|
|
|
struct config_item *target)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
|
|
|
|
struct nvmet_host *host;
|
|
|
|
struct nvmet_host_link *link, *p;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (target->ci_type != &nvmet_host_type) {
|
|
|
|
pr_err("can only link hosts into the allowed_hosts directory!\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
host = to_host(target);
|
|
|
|
link = kmalloc(sizeof(*link), GFP_KERNEL);
|
|
|
|
if (!link)
|
|
|
|
return -ENOMEM;
|
|
|
|
link->host = host;
|
|
|
|
|
|
|
|
down_write(&nvmet_config_sem);
|
|
|
|
ret = -EINVAL;
|
|
|
|
if (subsys->allow_any_host) {
|
|
|
|
pr_err("can't add hosts when allow_any_host is set!\n");
|
|
|
|
goto out_free_link;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = -EEXIST;
|
|
|
|
list_for_each_entry(p, &subsys->hosts, entry) {
|
|
|
|
if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
|
|
|
|
goto out_free_link;
|
|
|
|
}
|
|
|
|
list_add_tail(&link->entry, &subsys->hosts);
|
2018-11-13 05:56:40 +08:00
|
|
|
nvmet_subsys_disc_changed(subsys, host);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
return 0;
|
|
|
|
out_free_link:
|
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
kfree(link);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-11-28 20:22:42 +08:00
|
|
|
static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
|
2016-06-22 00:04:20 +08:00
|
|
|
struct config_item *target)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
|
|
|
|
struct nvmet_host *host = to_host(target);
|
|
|
|
struct nvmet_host_link *p;
|
|
|
|
|
|
|
|
down_write(&nvmet_config_sem);
|
|
|
|
list_for_each_entry(p, &subsys->hosts, entry) {
|
|
|
|
if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
up_write(&nvmet_config_sem);
|
2016-11-28 20:22:42 +08:00
|
|
|
return;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
found:
|
|
|
|
list_del(&p->entry);
|
2018-11-13 05:56:40 +08:00
|
|
|
nvmet_subsys_disc_changed(subsys, host);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
kfree(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
|
|
|
|
.allow_link = nvmet_allowed_hosts_allow_link,
|
|
|
|
.drop_link = nvmet_allowed_hosts_drop_link,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_allowed_hosts_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_item_ops = &nvmet_allowed_hosts_item_ops,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
return snprintf(page, PAGE_SIZE, "%d\n",
|
|
|
|
to_subsys(item)->allow_any_host);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = to_subsys(item);
|
|
|
|
bool allow_any_host;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (strtobool(page, &allow_any_host))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
down_write(&nvmet_config_sem);
|
|
|
|
if (allow_any_host && !list_empty(&subsys->hosts)) {
|
|
|
|
pr_err("Can't set allow_any_host when explicit hosts are set!\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-11-13 05:56:40 +08:00
|
|
|
if (subsys->allow_any_host != allow_any_host) {
|
|
|
|
subsys->allow_any_host = allow_any_host;
|
|
|
|
nvmet_subsys_disc_changed(subsys, NULL);
|
|
|
|
}
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
out_unlock:
|
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
|
|
|
|
|
2017-07-14 21:36:54 +08:00
|
|
|
static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
|
2017-06-07 17:45:36 +08:00
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = to_subsys(item);
|
|
|
|
|
|
|
|
if (NVME_TERTIARY(subsys->ver))
|
|
|
|
return snprintf(page, PAGE_SIZE, "%d.%d.%d\n",
|
|
|
|
(int)NVME_MAJOR(subsys->ver),
|
|
|
|
(int)NVME_MINOR(subsys->ver),
|
|
|
|
(int)NVME_TERTIARY(subsys->ver));
|
|
|
|
else
|
|
|
|
return snprintf(page, PAGE_SIZE, "%d.%d\n",
|
|
|
|
(int)NVME_MAJOR(subsys->ver),
|
|
|
|
(int)NVME_MINOR(subsys->ver));
|
|
|
|
}
|
|
|
|
|
2017-07-14 21:36:54 +08:00
|
|
|
static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
|
2017-06-07 17:45:36 +08:00
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = to_subsys(item);
|
|
|
|
int major, minor, tertiary = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
|
|
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
|
|
|
|
if (ret != 2 && ret != 3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
down_write(&nvmet_config_sem);
|
|
|
|
subsys->ver = NVME_VS(major, minor, tertiary);
|
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
2017-07-14 21:36:54 +08:00
|
|
|
CONFIGFS_ATTR(nvmet_subsys_, attr_version);
|
2017-06-07 17:45:36 +08:00
|
|
|
|
2017-07-14 21:36:56 +08:00
|
|
|
static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = to_subsys(item);
|
|
|
|
|
|
|
|
return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = to_subsys(item);
|
|
|
|
|
|
|
|
down_write(&nvmet_config_sem);
|
|
|
|
sscanf(page, "%llx\n", &subsys->serial);
|
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
static struct configfs_attribute *nvmet_subsys_attrs[] = {
|
|
|
|
&nvmet_subsys_attr_attr_allow_any_host,
|
2017-07-14 21:36:54 +08:00
|
|
|
&nvmet_subsys_attr_attr_version,
|
2017-07-14 21:36:56 +08:00
|
|
|
&nvmet_subsys_attr_attr_serial,
|
2016-06-22 00:04:20 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Subsystem structures & folder operation functions below
|
|
|
|
*/
|
|
|
|
static void nvmet_subsys_release(struct config_item *item)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = to_subsys(item);
|
|
|
|
|
2016-11-28 04:29:17 +08:00
|
|
|
nvmet_subsys_del_ctrls(subsys);
|
2016-06-22 00:04:20 +08:00
|
|
|
nvmet_subsys_put(subsys);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_item_operations nvmet_subsys_item_ops = {
|
|
|
|
.release = nvmet_subsys_release,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_subsys_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_item_ops = &nvmet_subsys_item_ops,
|
|
|
|
.ct_attrs = nvmet_subsys_attrs,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct config_group *nvmet_subsys_make(struct config_group *group,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys;
|
|
|
|
|
|
|
|
if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
|
|
|
|
pr_err("can't create discovery subsystem through configfs\n");
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
|
2019-04-07 14:28:06 +08:00
|
|
|
if (IS_ERR(subsys))
|
|
|
|
return ERR_CAST(subsys);
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
|
|
|
|
|
|
|
|
config_group_init_type_name(&subsys->namespaces_group,
|
|
|
|
"namespaces", &nvmet_namespaces_type);
|
|
|
|
configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
|
|
|
|
|
|
|
|
config_group_init_type_name(&subsys->allowed_hosts_group,
|
|
|
|
"allowed_hosts", &nvmet_allowed_hosts_type);
|
|
|
|
configfs_add_default_group(&subsys->allowed_hosts_group,
|
|
|
|
&subsys->group);
|
|
|
|
|
|
|
|
return &subsys->group;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_group_operations nvmet_subsystems_group_ops = {
|
|
|
|
.make_group = nvmet_subsys_make,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_subsystems_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_group_ops = &nvmet_subsystems_group_ops,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t nvmet_referral_enable_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_referral_enable_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
bool enable;
|
|
|
|
|
|
|
|
if (strtobool(page, &enable))
|
|
|
|
goto inval;
|
|
|
|
|
|
|
|
if (enable)
|
|
|
|
nvmet_referral_enable(parent, port);
|
|
|
|
else
|
2018-11-13 05:56:40 +08:00
|
|
|
nvmet_referral_disable(parent, port);
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
return count;
|
|
|
|
inval:
|
|
|
|
pr_err("Invalid value '%s' for enable\n", page);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_referral_, enable);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Discovery Service subsystem definitions
|
|
|
|
*/
|
|
|
|
static struct configfs_attribute *nvmet_referral_attrs[] = {
|
|
|
|
&nvmet_attr_addr_adrfam,
|
|
|
|
&nvmet_attr_addr_portid,
|
|
|
|
&nvmet_attr_addr_treq,
|
|
|
|
&nvmet_attr_addr_traddr,
|
|
|
|
&nvmet_attr_addr_trsvcid,
|
|
|
|
&nvmet_attr_addr_trtype,
|
|
|
|
&nvmet_referral_attr_enable,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nvmet_referral_release(struct config_item *item)
|
|
|
|
{
|
2018-11-13 05:56:40 +08:00
|
|
|
struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
|
2016-06-22 00:04:20 +08:00
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
|
2018-11-13 05:56:40 +08:00
|
|
|
nvmet_referral_disable(parent, port);
|
2016-06-22 00:04:20 +08:00
|
|
|
kfree(port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_item_operations nvmet_referral_item_ops = {
|
|
|
|
.release = nvmet_referral_release,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_referral_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
.ct_attrs = nvmet_referral_attrs,
|
|
|
|
.ct_item_ops = &nvmet_referral_item_ops,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct config_group *nvmet_referral_make(
|
|
|
|
struct config_group *group, const char *name)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port;
|
|
|
|
|
|
|
|
port = kzalloc(sizeof(*port), GFP_KERNEL);
|
|
|
|
if (!port)
|
2016-07-07 16:15:26 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&port->entry);
|
|
|
|
config_group_init_type_name(&port->group, name, &nvmet_referral_type);
|
|
|
|
|
|
|
|
return &port->group;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_group_operations nvmet_referral_group_ops = {
|
|
|
|
.make_group = nvmet_referral_make,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_referrals_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
.ct_group_ops = &nvmet_referral_group_ops,
|
|
|
|
};
|
|
|
|
|
2018-06-01 14:59:25 +08:00
|
|
|
static struct {
|
|
|
|
enum nvme_ana_state state;
|
|
|
|
const char *name;
|
|
|
|
} nvmet_ana_state_names[] = {
|
|
|
|
{ NVME_ANA_OPTIMIZED, "optimized" },
|
|
|
|
{ NVME_ANA_NONOPTIMIZED, "non-optimized" },
|
|
|
|
{ NVME_ANA_INACCESSIBLE, "inaccessible" },
|
|
|
|
{ NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
|
|
|
|
{ NVME_ANA_CHANGE, "change" },
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
|
|
|
|
char *page)
|
|
|
|
{
|
|
|
|
struct nvmet_ana_group *grp = to_ana_group(item);
|
|
|
|
enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
|
|
|
|
if (state != nvmet_ana_state_names[i].state)
|
|
|
|
continue;
|
|
|
|
return sprintf(page, "%s\n", nvmet_ana_state_names[i].name);
|
|
|
|
}
|
|
|
|
|
|
|
|
return sprintf(page, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
|
|
|
|
const char *page, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_ana_group *grp = to_ana_group(item);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
|
|
|
|
if (sysfs_streq(page, nvmet_ana_state_names[i].name))
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_err("Invalid value '%s' for ana_state\n", page);
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
found:
|
|
|
|
down_write(&nvmet_ana_sem);
|
|
|
|
grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state;
|
|
|
|
nvmet_ana_chgcnt++;
|
|
|
|
up_write(&nvmet_ana_sem);
|
|
|
|
|
|
|
|
nvmet_port_send_ana_event(grp->port);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
|
|
|
|
|
|
|
|
static struct configfs_attribute *nvmet_ana_group_attrs[] = {
|
|
|
|
&nvmet_ana_group_attr_ana_state,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nvmet_ana_group_release(struct config_item *item)
|
|
|
|
{
|
|
|
|
struct nvmet_ana_group *grp = to_ana_group(item);
|
|
|
|
|
|
|
|
if (grp == &grp->port->ana_default_group)
|
|
|
|
return;
|
|
|
|
|
|
|
|
down_write(&nvmet_ana_sem);
|
|
|
|
grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
|
|
|
|
nvmet_ana_group_enabled[grp->grpid]--;
|
|
|
|
up_write(&nvmet_ana_sem);
|
|
|
|
|
|
|
|
nvmet_port_send_ana_event(grp->port);
|
|
|
|
kfree(grp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_item_operations nvmet_ana_group_item_ops = {
|
|
|
|
.release = nvmet_ana_group_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct config_item_type nvmet_ana_group_type = {
|
|
|
|
.ct_item_ops = &nvmet_ana_group_item_ops,
|
|
|
|
.ct_attrs = nvmet_ana_group_attrs,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct config_group *nvmet_ana_groups_make_group(
|
|
|
|
struct config_group *group, const char *name)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
|
|
|
|
struct nvmet_ana_group *grp;
|
|
|
|
u32 grpid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kstrtou32(name, 0, &grpid);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
grp = kzalloc(sizeof(*grp), GFP_KERNEL);
|
|
|
|
if (!grp)
|
|
|
|
goto out;
|
|
|
|
grp->port = port;
|
|
|
|
grp->grpid = grpid;
|
|
|
|
|
|
|
|
down_write(&nvmet_ana_sem);
|
|
|
|
nvmet_ana_group_enabled[grpid]++;
|
|
|
|
up_write(&nvmet_ana_sem);
|
|
|
|
|
|
|
|
nvmet_port_send_ana_event(grp->port);
|
|
|
|
|
|
|
|
config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
|
|
|
|
return &grp->group;
|
|
|
|
out:
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_group_operations nvmet_ana_groups_group_ops = {
|
|
|
|
.make_group = nvmet_ana_groups_make_group,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct config_item_type nvmet_ana_groups_type = {
|
|
|
|
.ct_group_ops = &nvmet_ana_groups_group_ops,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
/*
|
|
|
|
* Ports definitions.
|
|
|
|
*/
|
|
|
|
static void nvmet_port_release(struct config_item *item)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port = to_nvmet_port(item);
|
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
/* Let inflight controllers teardown complete */
|
|
|
|
flush_scheduled_work();
|
2018-11-13 05:56:40 +08:00
|
|
|
list_del(&port->global_entry);
|
|
|
|
|
2018-07-19 22:35:20 +08:00
|
|
|
kfree(port->ana_state);
|
2016-06-22 00:04:20 +08:00
|
|
|
kfree(port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_attribute *nvmet_port_attrs[] = {
|
|
|
|
&nvmet_attr_addr_adrfam,
|
|
|
|
&nvmet_attr_addr_treq,
|
|
|
|
&nvmet_attr_addr_traddr,
|
|
|
|
&nvmet_attr_addr_trsvcid,
|
|
|
|
&nvmet_attr_addr_trtype,
|
nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
The patch enables inline data sizes using up to 4 recv sges, and capping
the size at 16KB or at least 1 page size. So on a 4K page system, up to
16KB is supported, and for a 64K page system 1 page of 64KB is supported.
We avoid > 0 order page allocations for the inline buffers by using
multiple recv sges, one for each page. If the device cannot support
the configured inline data size due to lack of enough recv sges, then
log a warning and reduce the inline size.
Add a new configfs port attribute, called param_inline_data_size,
to allow configuring the size of inline data for a given nvmf port.
The maximum size allowed is still enforced by nvmet-rdma with
NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
And the default size, if not specified via configfs, is still PAGE_SIZE.
This preserves the existing behavior, but allows larger inline sizes
for small page systems. If the configured inline data size exceeds
NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
reduced. If param_inline_data_size is set to 0, then inline data is
disabled for that nvmf port.
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2018-06-20 22:15:10 +08:00
|
|
|
&nvmet_attr_param_inline_data_size,
|
2016-06-22 00:04:20 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct configfs_item_operations nvmet_port_item_ops = {
|
|
|
|
.release = nvmet_port_release,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_port_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_attrs = nvmet_port_attrs,
|
|
|
|
.ct_item_ops = &nvmet_port_item_ops,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct config_group *nvmet_ports_make(struct config_group *group,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
struct nvmet_port *port;
|
|
|
|
u16 portid;
|
2018-06-01 14:59:25 +08:00
|
|
|
u32 i;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
if (kstrtou16(name, 0, &portid))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
port = kzalloc(sizeof(*port), GFP_KERNEL);
|
|
|
|
if (!port)
|
2016-07-07 16:15:26 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2018-07-19 22:35:20 +08:00
|
|
|
port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
|
|
|
|
sizeof(*port->ana_state), GFP_KERNEL);
|
|
|
|
if (!port->ana_state) {
|
|
|
|
kfree(port);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2018-06-01 14:59:25 +08:00
|
|
|
for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
|
|
|
|
if (i == NVMET_DEFAULT_ANA_GRPID)
|
|
|
|
port->ana_state[1] = NVME_ANA_OPTIMIZED;
|
|
|
|
else
|
|
|
|
port->ana_state[i] = NVME_ANA_INACCESSIBLE;
|
|
|
|
}
|
2018-07-19 22:35:20 +08:00
|
|
|
|
2018-11-13 05:56:40 +08:00
|
|
|
list_add(&port->global_entry, &nvmet_ports_list);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
INIT_LIST_HEAD(&port->entry);
|
|
|
|
INIT_LIST_HEAD(&port->subsystems);
|
|
|
|
INIT_LIST_HEAD(&port->referrals);
|
nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
The patch enables inline data sizes using up to 4 recv sges, and capping
the size at 16KB or at least 1 page size. So on a 4K page system, up to
16KB is supported, and for a 64K page system 1 page of 64KB is supported.
We avoid > 0 order page allocations for the inline buffers by using
multiple recv sges, one for each page. If the device cannot support
the configured inline data size due to lack of enough recv sges, then
log a warning and reduce the inline size.
Add a new configfs port attribute, called param_inline_data_size,
to allow configuring the size of inline data for a given nvmf port.
The maximum size allowed is still enforced by nvmet-rdma with
NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
And the default size, if not specified via configfs, is still PAGE_SIZE.
This preserves the existing behavior, but allows larger inline sizes
for small page systems. If the configured inline data size exceeds
NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
reduced. If param_inline_data_size is set to 0, then inline data is
disabled for that nvmf port.
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2018-06-20 22:15:10 +08:00
|
|
|
port->inline_data_size = -1; /* < 0 == let the transport choose */
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
port->disc_addr.portid = cpu_to_le16(portid);
|
2018-11-20 17:34:19 +08:00
|
|
|
port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
|
2016-06-22 00:04:20 +08:00
|
|
|
config_group_init_type_name(&port->group, name, &nvmet_port_type);
|
|
|
|
|
|
|
|
config_group_init_type_name(&port->subsys_group,
|
|
|
|
"subsystems", &nvmet_port_subsys_type);
|
|
|
|
configfs_add_default_group(&port->subsys_group, &port->group);
|
|
|
|
|
|
|
|
config_group_init_type_name(&port->referrals_group,
|
|
|
|
"referrals", &nvmet_referrals_type);
|
|
|
|
configfs_add_default_group(&port->referrals_group, &port->group);
|
|
|
|
|
2018-06-01 14:59:25 +08:00
|
|
|
config_group_init_type_name(&port->ana_groups_group,
|
|
|
|
"ana_groups", &nvmet_ana_groups_type);
|
|
|
|
configfs_add_default_group(&port->ana_groups_group, &port->group);
|
|
|
|
|
|
|
|
port->ana_default_group.port = port;
|
|
|
|
port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
|
|
|
|
config_group_init_type_name(&port->ana_default_group.group,
|
|
|
|
__stringify(NVMET_DEFAULT_ANA_GRPID),
|
|
|
|
&nvmet_ana_group_type);
|
|
|
|
configfs_add_default_group(&port->ana_default_group.group,
|
|
|
|
&port->ana_groups_group);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
return &port->group;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_group_operations nvmet_ports_group_ops = {
|
|
|
|
.make_group = nvmet_ports_make,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_ports_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_group_ops = &nvmet_ports_group_ops,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct config_group nvmet_subsystems_group;
|
|
|
|
static struct config_group nvmet_ports_group;
|
|
|
|
|
|
|
|
static void nvmet_host_release(struct config_item *item)
|
|
|
|
{
|
|
|
|
struct nvmet_host *host = to_host(item);
|
|
|
|
|
|
|
|
kfree(host);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_item_operations nvmet_host_item_ops = {
|
|
|
|
.release = nvmet_host_release,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_host_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_item_ops = &nvmet_host_item_ops,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct config_group *nvmet_hosts_make_group(struct config_group *group,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
struct nvmet_host *host;
|
|
|
|
|
|
|
|
host = kzalloc(sizeof(*host), GFP_KERNEL);
|
|
|
|
if (!host)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
config_group_init_type_name(&host->group, name, &nvmet_host_type);
|
|
|
|
|
|
|
|
return &host->group;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_group_operations nvmet_hosts_group_ops = {
|
|
|
|
.make_group = nvmet_hosts_make_group,
|
|
|
|
};
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_hosts_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_group_ops = &nvmet_hosts_group_ops,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct config_group nvmet_hosts_group;
|
|
|
|
|
2017-10-16 23:18:47 +08:00
|
|
|
static const struct config_item_type nvmet_root_type = {
|
2016-06-22 00:04:20 +08:00
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct configfs_subsystem nvmet_configfs_subsystem = {
|
|
|
|
.su_group = {
|
|
|
|
.cg_item = {
|
|
|
|
.ci_namebuf = "nvmet",
|
|
|
|
.ci_type = &nvmet_root_type,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
int __init nvmet_init_configfs(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
config_group_init(&nvmet_configfs_subsystem.su_group);
|
|
|
|
mutex_init(&nvmet_configfs_subsystem.su_mutex);
|
|
|
|
|
|
|
|
config_group_init_type_name(&nvmet_subsystems_group,
|
|
|
|
"subsystems", &nvmet_subsystems_type);
|
|
|
|
configfs_add_default_group(&nvmet_subsystems_group,
|
|
|
|
&nvmet_configfs_subsystem.su_group);
|
|
|
|
|
|
|
|
config_group_init_type_name(&nvmet_ports_group,
|
|
|
|
"ports", &nvmet_ports_type);
|
|
|
|
configfs_add_default_group(&nvmet_ports_group,
|
|
|
|
&nvmet_configfs_subsystem.su_group);
|
|
|
|
|
|
|
|
config_group_init_type_name(&nvmet_hosts_group,
|
|
|
|
"hosts", &nvmet_hosts_type);
|
|
|
|
configfs_add_default_group(&nvmet_hosts_group,
|
|
|
|
&nvmet_configfs_subsystem.su_group);
|
|
|
|
|
|
|
|
ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("configfs_register_subsystem: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __exit nvmet_exit_configfs(void)
|
|
|
|
{
|
|
|
|
configfs_unregister_subsystem(&nvmet_configfs_subsystem);
|
|
|
|
}
|