nvme updates for Linux 5.13

- refactor the ioctl coee
  - fix a segmentation fault during io parsing error in nvmet-tcp
    (Elad Grupi)
  - fix NULL derefence in nvme_ctrl_fast_io_fail_tmo_show/store
    (Gopal Tiwari)
  - properly respect the sgl_threshold flag in nvme-pci (Niklas Cassel)
  - misc cleanups (Niklas Cassel, Amit Engel, Minwoo Im, Colin Ian King)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmB4VTwLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYPzyw/+KNku6zEXU+Vqvv/Ud9k8uabkgEaiF37LgcYHPPyQ
 xfv9I5M7B7P7yTh4AJGpK0G64sXzW3JWXWcQ5FhsL2del3urjUk9BnUB6Vi6vQw6
 HFiSrbLRjx1Njk5mEZGJZNgkxpEB2YZNudzQ02MhZzvModhBhJ4I2IeS2Xa6IjpK
 XfaNNJxmL8xiBDkD68R/ancHWkHmYVT5Y5HtEvTU0anqjrWZy2iWlkFH4tkYjZ0l
 XbNqLJNPymx83MVINrhmoaSaI3dTxoQsNzpigEageiZ026z0zBElg51YXz2DXErQ
 7ETHqt/c3AHlVeEjVtHTcNMoSWHpBnc7MEGAiQ7K6BHF25dn9ifvCxsQgFCKvcv9
 EakqQCp20qtOCRtRb3FEV3qImAAv2gvTJIKvG1+42K1S+Hd1pgxesLkqozLFqegG
 f70hQwBGMyoTsebDrW+iqsN124u9fcOXN9+71V7AAzOXnoiZn8qcuToOpaBPO6cp
 UXZYz0yRyuIVY7gWsDIgnVx8iSYXs75kcqaTfV0ilpYycR8ILy8oRIGXuHpVNuxt
 JYW/0pdy5DTR9k9TbpsPBKop9W3pMItE1tXsop6Ybg2TzHa+nbTRnj2ympCkARPS
 KnYcU6OAFNvhCTE5+qsefe+hLouVbJsYF/DO44vtgfBlNK69Lep+tQDF8eAwqiOw
 kHc=
 =30Ha
 -----END PGP SIGNATURE-----

Merge tag 'nvme-5.13-2021-04-15' of git://git.infradead.org/nvme into for-5.13/drivers

Pull NVMe updates from Christoph:

"nvme updates for Linux 5.13

 - refactor the ioctl code
 - fix a segmentation fault during io parsing error in nvmet-tcp
   (Elad Grupi)
 - fix NULL derefence in nvme_ctrl_fast_io_fail_tmo_show/store
   (Gopal Tiwari)
 - properly respect the sgl_threshold flag in nvme-pci (Niklas Cassel)
 - misc cleanups (Niklas Cassel, Amit Engel, Minwoo Im, Colin Ian King)"

* tag 'nvme-5.13-2021-04-15' of git://git.infradead.org/nvme:
  nvme: fix NULL derefence in nvme_ctrl_fast_io_fail_tmo_show/store
  nvme: let namespace probing continue for unsupported features
  nvme: factor out nvme_ns_open and nvme_ns_release helpers
  nvme: move nvme_ns_head_ops to multipath.c
  nvme: factor out a nvme_tryget_ns_head helper
  nvme: move the ioctl code to a separate file
  nvme: don't bother to look up a namespace for controller ioctls
  nvme: simplify block device ioctl handling for the !multipath case
  nvme: simplify the compat ioctl handling
  nvme: factor out a nvme_ns_ioctl helper
  nvme: pass a user pointer to nvme_nvm_ioctl
  nvme: cleanup setting the disk name
  nvme: add a nvme_ns_head_multipath helper
  nvme: remove single trailing whitespace
  nvme-multipath: remove single trailing whitespace
  nvme-pci: remove single trailing whitespace
  nvme-pci: don't simple map sgl when sgls are disabled
  nvmet: fix a spelling mistake "nubmer" -> "number"
  nvmet-fc: simplify nvmet_fc_alloc_hostport
  nvmet-tcp: fix a segmentation fault during io parsing error
This commit is contained in:
Jens Axboe 2021-04-15 12:15:12 -06:00
commit e63c8eb132
11 changed files with 677 additions and 583 deletions

View File

@ -9,7 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
nvme-core-y := core.o
nvme-core-y := core.o ioctl.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o

View File

@ -112,7 +112,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
set_capacity_and_notify(ns->disk, 0);
}
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
/*
* Only new queue scan work when admin and IO queues are both alive
@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@ -549,7 +549,12 @@ static void nvme_free_ns_head(struct kref *ref)
kfree(head);
}
static void nvme_put_ns_head(struct nvme_ns_head *head)
bool nvme_tryget_ns_head(struct nvme_ns_head *head)
{
return kref_get_unless_zero(&head->ref);
}
void nvme_put_ns_head(struct nvme_ns_head *head)
{
kref_put(&head->ref, nvme_free_ns_head);
}
@ -1016,40 +1021,6 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
unsigned len, u32 seed, bool write)
{
struct bio_integrity_payload *bip;
int ret = -ENOMEM;
void *buf;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
goto out;
ret = -EFAULT;
if (write && copy_from_user(buf, ubuf, len))
goto out_free_meta;
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
if (IS_ERR(bip)) {
ret = PTR_ERR(bip);
goto out_free_meta;
}
bip->bip_iter.bi_size = len;
bip->bip_iter.bi_sector = seed;
ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
offset_in_page(buf));
if (ret == len)
return buf;
ret = -ENOMEM;
out_free_meta:
kfree(buf);
out:
return ERR_PTR(ret);
}
static u32 nvme_known_admin_effects(u8 opcode)
{
switch (opcode) {
@ -1138,66 +1109,6 @@ void nvme_execute_passthru_rq(struct request *rq)
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u64 *result, unsigned timeout)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
struct bio *bio = NULL;
void *meta = NULL;
int ret;
req = nvme_alloc_request(q, cmd, 0);
if (IS_ERR(req))
return PTR_ERR(req);
if (timeout)
req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
GFP_KERNEL);
if (ret)
goto out;
bio = req->bio;
if (bdev)
bio_set_dev(bio, bdev);
if (bdev && meta_buffer && meta_len) {
meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
meta_seed, write);
if (IS_ERR(meta)) {
ret = PTR_ERR(meta);
goto out_unmap;
}
req->cmd_flags |= REQ_INTEGRITY;
}
}
nvme_execute_passthru_rq(req);
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
ret = -EINTR;
else
ret = nvme_req(req)->status;
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (meta && !ret && !write) {
if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT;
}
kfree(meta);
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
out:
blk_mq_free_request(req);
return ret;
}
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
@ -1542,182 +1453,6 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
queue_work(nvme_wq, &ctrl->async_event_work);
}
/*
* Convert integer values from ioctl structures to user pointers, silently
* ignoring the upper bits in the compat case to match behaviour of 32-bit
* kernels.
*/
static void __user *nvme_to_user_ptr(uintptr_t ptrval)
{
if (in_compat_syscall())
ptrval = (compat_uptr_t)ptrval;
return (void __user *)ptrval;
}
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_user_io io;
struct nvme_command c;
unsigned length, meta_len;
void __user *metadata;
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
if (io.flags)
return -EINVAL;
switch (io.opcode) {
case nvme_cmd_write:
case nvme_cmd_read:
case nvme_cmd_compare:
break;
default:
return -EINVAL;
}
length = (io.nblocks + 1) << ns->lba_shift;
if ((io.control & NVME_RW_PRINFO_PRACT) &&
ns->ms == sizeof(struct t10_pi_tuple)) {
/*
* Protection information is stripped/inserted by the
* controller.
*/
if (nvme_to_user_ptr(io.metadata))
return -EINVAL;
meta_len = 0;
metadata = NULL;
} else {
meta_len = (io.nblocks + 1) * ns->ms;
metadata = nvme_to_user_ptr(io.metadata);
}
if (ns->features & NVME_NS_EXT_LBAS) {
length += meta_len;
meta_len = 0;
} else if (meta_len) {
if ((io.metadata & 3) || !io.metadata)
return -EINVAL;
}
memset(&c, 0, sizeof(c));
c.rw.opcode = io.opcode;
c.rw.flags = io.flags;
c.rw.nsid = cpu_to_le32(ns->head->ns_id);
c.rw.slba = cpu_to_le64(io.slba);
c.rw.length = cpu_to_le16(io.nblocks);
c.rw.control = cpu_to_le16(io.control);
c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
c.rw.reftag = cpu_to_le32(io.reftag);
c.rw.apptag = cpu_to_le16(io.apptag);
c.rw.appmask = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c,
nvme_to_user_ptr(io.addr), length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd)
{
struct nvme_passthru_cmd cmd;
struct nvme_command c;
unsigned timeout = 0;
u64 result;
int status;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
if (cmd.flags)
return -EINVAL;
if (ns && cmd.nsid != ns->head->ns_id) {
dev_err(ctrl->device,
"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
current->comm, cmd.nsid, ns->head->ns_id);
return -EINVAL;
}
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
c.common.flags = cmd.flags;
c.common.nsid = cpu_to_le32(cmd.nsid);
c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
c.common.cdw10 = cpu_to_le32(cmd.cdw10);
c.common.cdw11 = cpu_to_le32(cmd.cdw11);
c.common.cdw12 = cpu_to_le32(cmd.cdw12);
c.common.cdw13 = cpu_to_le32(cmd.cdw13);
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout);
if (status >= 0) {
if (put_user(result, &ucmd->result))
return -EFAULT;
}
return status;
}
static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd64 __user *ucmd)
{
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
unsigned timeout = 0;
int status;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
if (cmd.flags)
return -EINVAL;
if (ns && cmd.nsid != ns->head->ns_id) {
dev_err(ctrl->device,
"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
current->comm, cmd.nsid, ns->head->ns_id);
return -EINVAL;
}
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
c.common.flags = cmd.flags;
c.common.nsid = cpu_to_le32(cmd.nsid);
c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
c.common.cdw10 = cpu_to_le32(cmd.cdw10);
c.common.cdw11 = cpu_to_le32(cmd.cdw11);
c.common.cdw12 = cpu_to_le32(cmd.cdw12);
c.common.cdw13 = cpu_to_le32(cmd.cdw13);
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
return -EFAULT;
}
return status;
}
/*
* Issue ioctl requests on the first available path. Note that unlike normal
* block layer requests we will not retry failed request on another controller.
@ -1748,136 +1483,12 @@ void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
srcu_read_unlock(&head->srcu, idx);
}
static bool is_ctrl_ioctl(unsigned int cmd)
static int nvme_ns_open(struct nvme_ns *ns)
{
if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
return true;
if (is_sed_ioctl(cmd))
return true;
return false;
}
static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp,
struct nvme_ns_head *head,
int srcu_idx)
{
struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
nvme_get_ctrl(ns->ctrl);
nvme_put_ns_from_disk(head, srcu_idx);
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
ret = nvme_user_cmd(ctrl, NULL, argp);
break;
case NVME_IOCTL_ADMIN64_CMD:
ret = nvme_user_cmd64(ctrl, NULL, argp);
break;
default:
ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
break;
}
nvme_put_ctrl(ctrl);
return ret;
}
static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nvme_ns_head *head = NULL;
void __user *argp = (void __user *)arg;
struct nvme_ns *ns;
int srcu_idx, ret;
ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
if (unlikely(!ns))
return -EWOULDBLOCK;
/*
* Handle ioctls that apply to the controller instead of the namespace
* seperately and drop the ns SRCU reference early. This avoids a
* deadlock when deleting namespaces using the passthrough interface.
*/
if (is_ctrl_ioctl(cmd))
return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
switch (cmd) {
case NVME_IOCTL_ID:
force_successful_syscall_return();
ret = ns->head->ns_id;
break;
case NVME_IOCTL_IO_CMD:
ret = nvme_user_cmd(ns->ctrl, ns, argp);
break;
case NVME_IOCTL_SUBMIT_IO:
ret = nvme_submit_io(ns, argp);
break;
case NVME_IOCTL_IO64_CMD:
ret = nvme_user_cmd64(ns->ctrl, ns, argp);
break;
default:
if (ns->ndev)
ret = nvme_nvm_ioctl(ns, cmd, arg);
else
ret = -ENOTTY;
}
nvme_put_ns_from_disk(head, srcu_idx);
return ret;
}
#ifdef CONFIG_COMPAT
struct nvme_user_io32 {
__u8 opcode;
__u8 flags;
__u16 control;
__u16 nblocks;
__u16 rsvd;
__u64 metadata;
__u64 addr;
__u64 slba;
__u32 dsmgmt;
__u32 reftag;
__u16 apptag;
__u16 appmask;
} __attribute__((__packed__));
#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
/*
* Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
* between 32 bit programs and 64 bit kernel.
* The cause is that the results of sizeof(struct nvme_user_io),
* which is used to define NVME_IOCTL_SUBMIT_IO,
* are not same between 32 bit compiler and 64 bit compiler.
* NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
* NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
* Other IOCTL numbers are same between 32 bit and 64 bit.
* So there is nothing to do regarding to other IOCTL numbers.
*/
if (cmd == NVME_IOCTL_SUBMIT_IO32)
return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);
return nvme_ioctl(bdev, mode, cmd, arg);
}
#else
#define nvme_compat_ioctl NULL
#endif /* CONFIG_COMPAT */
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
struct nvme_ns *ns = bdev->bd_disk->private_data;
#ifdef CONFIG_NVME_MULTIPATH
/* should never be called due to GENHD_FL_HIDDEN */
if (WARN_ON_ONCE(ns->head->disk))
if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
goto fail;
#endif
if (!kref_get_unless_zero(&ns->kref))
goto fail;
if (!try_module_get(ns->ctrl->ops->module))
@ -1891,15 +1502,24 @@ fail:
return -ENXIO;
}
static void nvme_release(struct gendisk *disk, fmode_t mode)
static void nvme_ns_release(struct nvme_ns *ns)
{
struct nvme_ns *ns = disk->private_data;
module_put(ns->ctrl->ops->module);
nvme_put_ns(ns);
}
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
return nvme_ns_open(bdev->bd_disk->private_data);
}
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
nvme_ns_release(disk->private_data);
}
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
/* some standard values */
geo->heads = 1 << 6;
@ -2212,11 +1832,10 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns);
if (ret && !nvme_first_scan(ns->disk))
return ret;
goto out;
}
#ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) {
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
blk_stack_limits(&ns->head->disk->queue->limits,
@ -2224,11 +1843,19 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
blk_queue_update_readahead(ns->head->disk->queue);
blk_mq_unfreeze_queue(ns->head->disk->queue);
}
#endif
return 0;
out_unfreeze:
blk_mq_unfreeze_queue(ns->disk->queue);
out:
/*
* If probing fails due an unsupported feature, hide the block device,
* but still allow other access.
*/
if (ret == -ENODEV) {
ns->disk->flags |= GENHD_FL_HIDDEN;
ret = 0;
}
return ret;
}
@ -2327,7 +1954,7 @@ static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
static const struct pr_ops nvme_pr_ops = {
const struct pr_ops nvme_pr_ops = {
.pr_register = nvme_pr_register,
.pr_reserve = nvme_pr_reserve,
.pr_release = nvme_pr_release,
@ -2360,7 +1987,6 @@ EXPORT_SYMBOL_GPL(nvme_sec_submit);
static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
.compat_ioctl = nvme_compat_ioctl,
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
@ -2369,31 +1995,25 @@ static const struct block_device_operations nvme_bdev_ops = {
};
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
struct nvme_ctrl *nvme_find_get_live_ctrl(struct nvme_subsystem *subsys)
{
struct nvme_ns_head *head = bdev->bd_disk->private_data;
struct nvme_ctrl *ctrl;
int ret;
if (!kref_get_unless_zero(&head->ref))
return -ENXIO;
return 0;
ret = mutex_lock_killable(&nvme_subsystems_lock);
if (ret)
return ERR_PTR(ret);
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
if (ctrl->state == NVME_CTRL_LIVE)
goto found;
}
mutex_unlock(&nvme_subsystems_lock);
return ERR_PTR(-EWOULDBLOCK);
found:
nvme_get_ctrl(ctrl);
mutex_unlock(&nvme_subsystems_lock);
return ctrl;
}
static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
{
nvme_put_ns_head(disk->private_data);
}
const struct block_device_operations nvme_ns_head_ops = {
.owner = THIS_MODULE,
.submit_bio = nvme_ns_head_submit_bio,
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ioctl,
.compat_ioctl = nvme_compat_ioctl,
.getgeo = nvme_getgeo,
.report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
#endif /* CONFIG_NVME_MULTIPATH */
static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
@ -3289,7 +2909,7 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
ret = nvme_configure_apst(ctrl);
if (ret < 0)
return ret;
ret = nvme_configure_timestamp(ctrl);
if (ret < 0)
return ret;
@ -3346,65 +2966,6 @@ static int nvme_dev_release(struct inode *inode, struct file *file)
return 0;
}
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
{
struct nvme_ns *ns;
int ret;
down_read(&ctrl->namespaces_rwsem);
if (list_empty(&ctrl->namespaces)) {
ret = -ENOTTY;
goto out_unlock;
}
ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
dev_warn(ctrl->device,
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
ret = -EINVAL;
goto out_unlock;
}
dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
kref_get(&ns->kref);
up_read(&ctrl->namespaces_rwsem);
ret = nvme_user_cmd(ctrl, ns, argp);
nvme_put_ns(ns);
return ret;
out_unlock:
up_read(&ctrl->namespaces_rwsem);
return ret;
}
static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct nvme_ctrl *ctrl = file->private_data;
void __user *argp = (void __user *)arg;
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD:
return nvme_user_cmd64(ctrl, NULL, argp);
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
dev_warn(ctrl->device, "resetting controller\n");
return nvme_reset_ctrl_sync(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
return nvme_reset_subsystem(ctrl);
case NVME_IOCTL_RESCAN:
nvme_queue_scan(ctrl);
return 0;
default:
return -ENOTTY;
}
}
static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE,
.open = nvme_dev_open,
@ -3818,6 +3379,8 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
return 0;
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
return 0;
return a->mode;
}
@ -3840,7 +3403,7 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) {
if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
if (h->ns_id == nsid && nvme_tryget_ns_head(h))
return h;
}
@ -4002,8 +3565,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_ns *ns;
struct gendisk *disk;
struct nvme_id_ns *id;
char disk_name[DISK_NAME_LEN];
int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT;
int node = ctrl->numa_node;
if (nvme_identify_ns(ctrl, nsid, ids, &id))
return;
@ -4029,7 +3591,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
goto out_free_queue;
nvme_set_disk_name(disk_name, ns, ctrl, &flags);
disk = alloc_disk_node(0, node);
if (!disk)
@ -4038,15 +3599,22 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
disk->fops = &nvme_bdev_ops;
disk->private_data = ns;
disk->queue = ns->queue;
disk->flags = flags;
memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
disk->flags = GENHD_FL_EXT_DEVT;
/*
* Without the multipath code enabled, multiple controller per
* subsystems are visible as devices and thus we cannot use the
* subsystem instance.
*/
if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
ns->head->instance);
ns->disk = disk;
if (nvme_update_ns_info(ns, id))
goto out_put_disk;
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
if (nvme_nvm_register(ns, disk_name, node)) {
if (nvme_nvm_register(ns, disk->disk_name, node)) {
dev_warn(ctrl->device, "LightNVM init failure\n");
goto out_put_disk;
}

455
drivers/nvme/host/ioctl.c Normal file
View File

@ -0,0 +1,455 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2014, Intel Corporation.
* Copyright (c) 2017-2021 Christoph Hellwig.
*/
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
#include "nvme.h"
/*
* Convert integer values from ioctl structures to user pointers, silently
* ignoring the upper bits in the compat case to match behaviour of 32-bit
* kernels.
*/
static void __user *nvme_to_user_ptr(uintptr_t ptrval)
{
if (in_compat_syscall())
ptrval = (compat_uptr_t)ptrval;
return (void __user *)ptrval;
}
static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
unsigned len, u32 seed, bool write)
{
struct bio_integrity_payload *bip;
int ret = -ENOMEM;
void *buf;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
goto out;
ret = -EFAULT;
if (write && copy_from_user(buf, ubuf, len))
goto out_free_meta;
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
if (IS_ERR(bip)) {
ret = PTR_ERR(bip);
goto out_free_meta;
}
bip->bip_iter.bi_size = len;
bip->bip_iter.bi_sector = seed;
ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
offset_in_page(buf));
if (ret == len)
return buf;
ret = -ENOMEM;
out_free_meta:
kfree(buf);
out:
return ERR_PTR(ret);
}
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u64 *result, unsigned timeout)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
struct bio *bio = NULL;
void *meta = NULL;
int ret;
req = nvme_alloc_request(q, cmd, 0);
if (IS_ERR(req))
return PTR_ERR(req);
if (timeout)
req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
GFP_KERNEL);
if (ret)
goto out;
bio = req->bio;
if (bdev)
bio_set_dev(bio, bdev);
if (bdev && meta_buffer && meta_len) {
meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
meta_seed, write);
if (IS_ERR(meta)) {
ret = PTR_ERR(meta);
goto out_unmap;
}
req->cmd_flags |= REQ_INTEGRITY;
}
}
nvme_execute_passthru_rq(req);
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
ret = -EINTR;
else
ret = nvme_req(req)->status;
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (meta && !ret && !write) {
if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT;
}
kfree(meta);
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
out:
blk_mq_free_request(req);
return ret;
}
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_user_io io;
struct nvme_command c;
unsigned length, meta_len;
void __user *metadata;
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
if (io.flags)
return -EINVAL;
switch (io.opcode) {
case nvme_cmd_write:
case nvme_cmd_read:
case nvme_cmd_compare:
break;
default:
return -EINVAL;
}
length = (io.nblocks + 1) << ns->lba_shift;
if ((io.control & NVME_RW_PRINFO_PRACT) &&
ns->ms == sizeof(struct t10_pi_tuple)) {
/*
* Protection information is stripped/inserted by the
* controller.
*/
if (nvme_to_user_ptr(io.metadata))
return -EINVAL;
meta_len = 0;
metadata = NULL;
} else {
meta_len = (io.nblocks + 1) * ns->ms;
metadata = nvme_to_user_ptr(io.metadata);
}
if (ns->features & NVME_NS_EXT_LBAS) {
length += meta_len;
meta_len = 0;
} else if (meta_len) {
if ((io.metadata & 3) || !io.metadata)
return -EINVAL;
}
memset(&c, 0, sizeof(c));
c.rw.opcode = io.opcode;
c.rw.flags = io.flags;
c.rw.nsid = cpu_to_le32(ns->head->ns_id);
c.rw.slba = cpu_to_le64(io.slba);
c.rw.length = cpu_to_le16(io.nblocks);
c.rw.control = cpu_to_le16(io.control);
c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
c.rw.reftag = cpu_to_le32(io.reftag);
c.rw.apptag = cpu_to_le16(io.apptag);
c.rw.appmask = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c,
nvme_to_user_ptr(io.addr), length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd)
{
struct nvme_passthru_cmd cmd;
struct nvme_command c;
unsigned timeout = 0;
u64 result;
int status;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
if (cmd.flags)
return -EINVAL;
if (ns && cmd.nsid != ns->head->ns_id) {
dev_err(ctrl->device,
"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
current->comm, cmd.nsid, ns->head->ns_id);
return -EINVAL;
}
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
c.common.flags = cmd.flags;
c.common.nsid = cpu_to_le32(cmd.nsid);
c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
c.common.cdw10 = cpu_to_le32(cmd.cdw10);
c.common.cdw11 = cpu_to_le32(cmd.cdw11);
c.common.cdw12 = cpu_to_le32(cmd.cdw12);
c.common.cdw13 = cpu_to_le32(cmd.cdw13);
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout);
if (status >= 0) {
if (put_user(result, &ucmd->result))
return -EFAULT;
}
return status;
}
static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd64 __user *ucmd)
{
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
unsigned timeout = 0;
int status;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
if (cmd.flags)
return -EINVAL;
if (ns && cmd.nsid != ns->head->ns_id) {
dev_err(ctrl->device,
"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
current->comm, cmd.nsid, ns->head->ns_id);
return -EINVAL;
}
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
c.common.flags = cmd.flags;
c.common.nsid = cpu_to_le32(cmd.nsid);
c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
c.common.cdw10 = cpu_to_le32(cmd.cdw10);
c.common.cdw11 = cpu_to_le32(cmd.cdw11);
c.common.cdw12 = cpu_to_le32(cmd.cdw12);
c.common.cdw13 = cpu_to_le32(cmd.cdw13);
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
return -EFAULT;
}
return status;
}
static bool is_ctrl_ioctl(unsigned int cmd)
{
if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
return true;
if (is_sed_ioctl(cmd))
return true;
return false;
}
static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
void __user *argp)
{
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD:
return nvme_user_cmd64(ctrl, NULL, argp);
default:
return sed_ioctl(ctrl->opal_dev, cmd, argp);
}
}
#ifdef COMPAT_FOR_U64_ALIGNMENT
struct nvme_user_io32 {
__u8 opcode;
__u8 flags;
__u16 control;
__u16 nblocks;
__u16 rsvd;
__u64 metadata;
__u64 addr;
__u64 slba;
__u32 dsmgmt;
__u32 reftag;
__u16 apptag;
__u16 appmask;
} __attribute__((__packed__));
#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
#endif /* COMPAT_FOR_U64_ALIGNMENT */
static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp)
{
switch (cmd) {
case NVME_IOCTL_ID:
force_successful_syscall_return();
return ns->head->ns_id;
case NVME_IOCTL_IO_CMD:
return nvme_user_cmd(ns->ctrl, ns, argp);
/*
* struct nvme_user_io can have different padding on some 32-bit ABIs.
* Just accept the compat version as all fields that are used are the
* same size and at the same offset.
*/
#ifdef COMPAT_FOR_U64_ALIGNMENT
case NVME_IOCTL_SUBMIT_IO32:
#endif
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, argp);
case NVME_IOCTL_IO64_CMD:
return nvme_user_cmd64(ns->ctrl, ns, argp);
default:
if (!ns->ndev)
return -ENOTTY;
return nvme_nvm_ioctl(ns, cmd, argp);
}
}
int nvme_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nvme_ns *ns = bdev->bd_disk->private_data;
void __user *argp = (void __user *)arg;
if (is_ctrl_ioctl(cmd))
return nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
return nvme_ns_ioctl(ns, cmd, argp);
}
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns_head *head,
unsigned int cmd, void __user *argp)
{
struct nvme_ctrl *ctrl = nvme_find_get_live_ctrl(head->subsys);
int ret;
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
ret = nvme_ctrl_ioctl(ctrl, cmd, argp);
nvme_put_ctrl(ctrl);
return ret;
}
static int nvme_ns_head_ns_ioctl(struct nvme_ns_head *head,
unsigned int cmd, void __user *argp)
{
int srcu_idx = srcu_read_lock(&head->srcu);
struct nvme_ns *ns = nvme_find_path(head);
int ret = -EWOULDBLOCK;
if (ns)
ret = nvme_ns_ioctl(ns, cmd, argp);
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nvme_ns_head *head = bdev->bd_disk->private_data;
if (is_ctrl_ioctl(cmd))
return nvme_ns_head_ctrl_ioctl(head, cmd, (void __user *)arg);
return nvme_ns_head_ns_ioctl(head, cmd, (void __user *)arg);
}
#endif /* CONFIG_NVME_MULTIPATH */
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
{
struct nvme_ns *ns;
int ret;
down_read(&ctrl->namespaces_rwsem);
if (list_empty(&ctrl->namespaces)) {
ret = -ENOTTY;
goto out_unlock;
}
ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
dev_warn(ctrl->device,
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
ret = -EINVAL;
goto out_unlock;
}
dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
kref_get(&ns->kref);
up_read(&ctrl->namespaces_rwsem);
ret = nvme_user_cmd(ctrl, ns, argp);
nvme_put_ns(ns);
return ret;
out_unlock:
up_read(&ctrl->namespaces_rwsem);
return ret;
}
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct nvme_ctrl *ctrl = file->private_data;
void __user *argp = (void __user *)arg;
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD:
return nvme_user_cmd64(ctrl, NULL, argp);
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
dev_warn(ctrl->device, "resetting controller\n");
return nvme_reset_ctrl_sync(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
return nvme_reset_subsystem(ctrl);
case NVME_IOCTL_RESCAN:
nvme_queue_scan(ctrl);
return 0;
default:
return -ENOTTY;
}
}

View File

@ -930,15 +930,15 @@ static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
return ret;
}
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp)
{
switch (cmd) {
case NVME_NVM_IOCTL_ADMIN_VIO:
return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
return nvme_nvm_user_vcmd(ns, 1, argp);
case NVME_NVM_IOCTL_IO_VIO:
return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
return nvme_nvm_user_vcmd(ns, 0, argp);
case NVME_NVM_IOCTL_SUBMIT_VIO:
return nvme_nvm_submit_vio(ns, (void __user *)arg);
return nvme_nvm_submit_vio(ns, argp);
default:
return -ENOTTY;
}

View File

@ -50,19 +50,19 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
* and those that have a single controller and use the controller node
* directly.
*/
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags)
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags)
{
if (!multipath) {
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
} else if (ns->head->disk) {
sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
ctrl->instance, ns->head->instance);
*flags = GENHD_FL_HIDDEN;
} else {
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
ns->head->instance);
if (!multipath)
return false;
if (!ns->head->disk) {
sprintf(disk_name, "nvme%dn%d", ns->ctrl->subsys->instance,
ns->head->instance);
return true;
}
sprintf(disk_name, "nvme%dc%dn%d", ns->ctrl->subsys->instance,
ns->ctrl->instance, ns->head->instance);
*flags = GENHD_FL_HIDDEN;
return true;
}
void nvme_failover_req(struct request *req)
@ -294,7 +294,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
return false;
}
blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
{
struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
@ -334,6 +334,29 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
return ret;
}
static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
{
if (!nvme_tryget_ns_head(bdev->bd_disk->private_data))
return -ENXIO;
return 0;
}
static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
{
nvme_put_ns_head(disk->private_data);
}
const struct block_device_operations nvme_ns_head_ops = {
.owner = THIS_MODULE,
.submit_bio = nvme_ns_head_submit_bio,
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ns_head_ioctl,
.getgeo = nvme_getgeo,
.report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
static void nvme_requeue_work(struct work_struct *work)
{
struct nvme_ns_head *head =
@ -674,7 +697,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
queue_work(nvme_wq, &ns->ctrl->ana_work);
}
} else {
ns->ana_state = NVME_ANA_OPTIMIZED;
ns->ana_state = NVME_ANA_OPTIMIZED;
nvme_mpath_set_live(ns);
}

View File

@ -413,8 +413,8 @@ struct nvme_ns_head {
bool shared;
int instance;
struct nvme_effects_log *effects;
#ifdef CONFIG_NVME_MULTIPATH
struct gendisk *disk;
#ifdef CONFIG_NVME_MULTIPATH
struct bio_list requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
@ -425,6 +425,11 @@ struct nvme_ns_head {
#endif
};
static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
{
return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
}
enum nvme_ns_features {
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
@ -642,16 +647,28 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset);
struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
struct nvme_ns_head **head, int *srcu_idx);
void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
bool nvme_tryget_ns_head(struct nvme_ns_head *head);
void nvme_put_ns_head(struct nvme_ns_head *head);
struct nvme_ctrl *nvme_find_get_live_ctrl(struct nvme_subsystem *subsys);
int nvme_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
@ -663,8 +680,7 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
@ -676,7 +692,6 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
blk_qc_t nvme_ns_head_submit_bio(struct bio *bio);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
@ -703,16 +718,11 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
return false;
}
/*
* Without the multipath code enabled, multiple controller per subsystems are
* visible as devices and thus we cannot use the subsystem instance.
*/
static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags)
static inline bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name,
int *flags)
{
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
return false;
}
static inline void nvme_failover_req(struct request *req)
{
}
@ -800,7 +810,7 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
extern const struct attribute_group nvme_nvm_attr_group;
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp);
#else
static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
int node)
@ -810,7 +820,7 @@ static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
unsigned long arg)
void __user *argp)
{
return -ENOTTY;
}

View File

@ -854,7 +854,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
if (iod->nvmeq->qid &&
if (iod->nvmeq->qid && sgl_threshold &&
dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
return nvme_setup_sgl_simple(dev, req,
&cmnd->rw, &bv);
@ -2172,7 +2172,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
clear_bit(NVMEQ_ENABLED, &adminq->flags);
if (dev->cmb_use_sqes) {

View File

@ -96,7 +96,7 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
dev_warn(ns->ctrl->device,
"zone operations:%x not supported for namespace:%u\n",
le16_to_cpu(id->zoc), ns->head->ns_id);
status = -EINVAL;
status = -ENODEV;
goto free_data;
}
@ -105,7 +105,7 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
dev_warn(ns->ctrl->device,
"invalid zone size:%llu for namespace:%u\n",
ns->zsze, ns->head->ns_id);
status = -EINVAL;
status = -ENODEV;
goto free_data;
}

View File

@ -1150,7 +1150,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
return -EINVAL;
if (len > NVMET_MN_MAX_SIZE) {
pr_err("Model nubmer size can not exceed %d Bytes\n",
pr_err("Model number size can not exceed %d Bytes\n",
NVMET_MN_MAX_SIZE);
return -EINVAL;
}

View File

@ -1020,61 +1020,76 @@ nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
nvmet_fc_hostport_put(hostport);
}
static struct nvmet_fc_hostport *
nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
struct nvmet_fc_hostport *host;
lockdep_assert_held(&tgtport->lock);
list_for_each_entry(host, &tgtport->host_list, host_list) {
if (host->hosthandle == hosthandle && !host->invalid) {
if (nvmet_fc_hostport_get(host))
return (host);
}
}
return NULL;
}
static struct nvmet_fc_hostport *
nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
struct nvmet_fc_hostport *newhost, *host, *match = NULL;
struct nvmet_fc_hostport *newhost, *match = NULL;
unsigned long flags;
/* if LLDD not implemented, leave as NULL */
if (!hosthandle)
return NULL;
/* take reference for what will be the newly allocated hostport */
/*
* take reference for what will be the newly allocated hostport if
* we end up using a new allocation
*/
if (!nvmet_fc_tgtport_get(tgtport))
return ERR_PTR(-EINVAL);
newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
if (!newhost) {
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(host, &tgtport->host_list, host_list) {
if (host->hosthandle == hosthandle && !host->invalid) {
if (nvmet_fc_hostport_get(host)) {
match = host;
break;
}
}
}
spin_unlock_irqrestore(&tgtport->lock, flags);
/* no allocation - release reference */
nvmet_fc_tgtport_put(tgtport);
return (match) ? match : ERR_PTR(-ENOMEM);
}
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
INIT_LIST_HEAD(&newhost->host_list);
kref_init(&newhost->ref);
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(host, &tgtport->host_list, host_list) {
if (host->hosthandle == hosthandle && !host->invalid) {
if (nvmet_fc_hostport_get(host)) {
match = host;
break;
}
}
}
if (match) {
kfree(newhost);
newhost = NULL;
/* releasing allocation - release reference */
nvmet_fc_tgtport_put(tgtport);
} else
list_add_tail(&newhost->host_list, &tgtport->host_list);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
spin_unlock_irqrestore(&tgtport->lock, flags);
return (match) ? match : newhost;
if (match) {
/* no new allocation - release reference */
nvmet_fc_tgtport_put(tgtport);
return match;
}
newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
if (!newhost) {
/* no new allocation - release reference */
nvmet_fc_tgtport_put(tgtport);
return ERR_PTR(-ENOMEM);
}
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
if (match) {
/* new allocation not needed */
kfree(newhost);
newhost = match;
/* no new allocation - release reference */
nvmet_fc_tgtport_put(tgtport);
} else {
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
INIT_LIST_HEAD(&newhost->host_list);
kref_init(&newhost->ref);
list_add_tail(&newhost->host_list, &tgtport->host_list);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
return newhost;
}
static void

View File

@ -537,11 +537,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
struct nvme_sgl_desc *sgl;
u32 len;
if (unlikely(cmd == queue->cmd)) {
sgl = &cmd->req.cmd->common.dptr.sgl;
len = le32_to_cpu(sgl->length);
/*
* Wait for inline data before processing the response.
* Avoid using helpers, this might happen before
* nvmet_req_init is completed.
*/
if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
len && len < cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd))
return;
}
llist_add(&cmd->lentry, &queue->resp_list);
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
}
static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
{
if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
nvmet_tcp_queue_response(&cmd->req);
else
cmd->req.execute(&cmd->req);
}
static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
{
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
@ -973,7 +998,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
le32_to_cpu(req->cmd->common.dptr.sgl.length));
nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
return -EAGAIN;
return 0;
}
ret = nvmet_tcp_map_data(queue->cmd);
@ -1116,10 +1141,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
}
nvmet_tcp_unmap_pdu_iovec(cmd);
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len) {
cmd->req.execute(&cmd->req);
}
if (cmd->rbytes_done == cmd->req.transfer_len)
nvmet_tcp_execute_request(cmd);
nvmet_prepare_receive_pdu(queue);
return 0;
@ -1156,9 +1179,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
goto out;
}
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len)
cmd->req.execute(&cmd->req);
if (cmd->rbytes_done == cmd->req.transfer_len)
nvmet_tcp_execute_request(cmd);
ret = 0;
out:
nvmet_prepare_receive_pdu(queue);