Merge branch 'nvme-5.2' of git://git.infradead.org/nvme into for-5.2/block

Pull NVMe changes from Christoph.

* 'nvme-5.2' of git://git.infradead.org/nvme:
  nvme: set 0 capacity if namespace block size exceeds PAGE_SIZE
  nvme-rdma: fix typo in struct comment
  nvme-loop: kill timeout handler
  nvme-tcp: rename function to have nvme_tcp prefix
  nvme-rdma: fix a NULL deref when an admin connect times out
  nvme-tcp: fix a NULL deref when an admin connect times out
  nvmet-tcp: don't fail maxr2t greater than 1
  nvmet-file: clamp-down file namespace lba_shift
  nvmet: include <linux/scatterlist.h>
  nvmet: return a specified error it subsys_alloc fails
  nvmet: rename nvme_completion instances from rsp to cqe
  nvmet-rdma: remove p2p_client initialization from fast-path
This commit is contained in:
Jens Axboe 2019-04-26 10:25:19 -06:00
commit 41d7f2ed84
15 changed files with 77 additions and 84 deletions

View File

@ -1591,6 +1591,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9); sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
unsigned short bs = 1 << ns->lba_shift; unsigned short bs = 1 << ns->lba_shift;
if (ns->lba_shift > PAGE_SHIFT) {
/* unsupported block size, set capacity to 0 later */
bs = (1 << 9);
}
blk_mq_freeze_queue(disk->queue); blk_mq_freeze_queue(disk->queue);
blk_integrity_unregister(disk); blk_integrity_unregister(disk);
@ -1601,7 +1605,8 @@ static void nvme_update_disk_info(struct gendisk *disk,
if (ns->ms && !ns->ext && if (ns->ms && !ns->ext &&
(ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
nvme_init_integrity(disk, ns->ms, ns->pi_type); nvme_init_integrity(disk, ns->ms, ns->pi_type);
if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
ns->lba_shift > PAGE_SHIFT)
capacity = 0; capacity = 0;
set_capacity(disk, capacity); set_capacity(disk, capacity);

View File

@ -914,8 +914,9 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
{ {
blk_mq_quiesce_queue(ctrl->ctrl.admin_q); blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_stop_queue(&ctrl->queues[0]);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, if (ctrl->ctrl.admin_tagset)
&ctrl->ctrl); blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
nvme_cancel_request, &ctrl->ctrl);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove); nvme_rdma_destroy_admin_queue(ctrl, remove);
} }
@ -926,8 +927,9 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
if (ctrl->ctrl.queue_count > 1) { if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl); nvme_rdma_stop_io_queues(ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, if (ctrl->ctrl.tagset)
&ctrl->ctrl); blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
nvme_cancel_request, &ctrl->ctrl);
if (remove) if (remove)
nvme_start_queues(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove); nvme_rdma_destroy_io_queues(ctrl, remove);

View File

@ -473,7 +473,6 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
} }
return 0; return 0;
} }
static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
@ -634,7 +633,6 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
nvme_end_request(rq, cpu_to_le16(status << 1), res); nvme_end_request(rq, cpu_to_le16(status << 1), res);
} }
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len) unsigned int *offset, size_t *len)
{ {
@ -1535,7 +1533,7 @@ out_free_queue:
return ret; return ret;
} }
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{ {
int i, ret; int i, ret;
@ -1565,7 +1563,7 @@ static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
return nr_io_queues; return nr_io_queues;
} }
static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl) static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{ {
unsigned int nr_io_queues; unsigned int nr_io_queues;
int ret; int ret;
@ -1582,7 +1580,7 @@ static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
dev_info(ctrl->device, dev_info(ctrl->device,
"creating %d I/O queues.\n", nr_io_queues); "creating %d I/O queues.\n", nr_io_queues);
return nvme_tcp_alloc_io_queues(ctrl); return __nvme_tcp_alloc_io_queues(ctrl);
} }
static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
@ -1599,7 +1597,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{ {
int ret; int ret;
ret = nvme_alloc_io_queues(ctrl); ret = nvme_tcp_alloc_io_queues(ctrl);
if (ret) if (ret)
return ret; return ret;
@ -1710,7 +1708,9 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
{ {
blk_mq_quiesce_queue(ctrl->admin_q); blk_mq_quiesce_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0); nvme_tcp_stop_queue(ctrl, 0);
blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl); if (ctrl->admin_tagset)
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
nvme_cancel_request, ctrl);
blk_mq_unquiesce_queue(ctrl->admin_q); blk_mq_unquiesce_queue(ctrl->admin_q);
nvme_tcp_destroy_admin_queue(ctrl, remove); nvme_tcp_destroy_admin_queue(ctrl, remove);
} }
@ -1722,7 +1722,9 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
return; return;
nvme_stop_queues(ctrl); nvme_stop_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl);
blk_mq_tagset_busy_iter(ctrl->tagset, nvme_cancel_request, ctrl); if (ctrl->tagset)
blk_mq_tagset_busy_iter(ctrl->tagset,
nvme_cancel_request, ctrl);
if (remove) if (remove)
nvme_start_queues(ctrl); nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove); nvme_tcp_destroy_io_queues(ctrl, remove);

View File

@ -3,6 +3,7 @@ config NVME_TARGET
tristate "NVMe Target support" tristate "NVMe Target support"
depends on BLOCK depends on BLOCK
depends on CONFIGFS_FS depends on CONFIGFS_FS
select SGL_ALLOC
help help
This enabled target side support for the NVMe protocol, that is This enabled target side support for the NVMe protocol, that is
it allows the Linux kernel to implement NVMe subsystems and it allows the Linux kernel to implement NVMe subsystems and

View File

@ -898,8 +898,8 @@ static struct config_group *nvmet_subsys_make(struct config_group *group,
} }
subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
if (!subsys) if (IS_ERR(subsys))
return ERR_PTR(-ENOMEM); return ERR_CAST(subsys);
config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type); config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);

View File

@ -8,6 +8,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/pci-p2pdma.h> #include <linux/pci-p2pdma.h>
#include <linux/scatterlist.h>
#include "nvmet.h" #include "nvmet.h"
@ -647,7 +648,7 @@ static void nvmet_update_sq_head(struct nvmet_req *req)
} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
old_sqhd); old_sqhd);
} }
req->rsp->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
} }
static void nvmet_set_error(struct nvmet_req *req, u16 status) static void nvmet_set_error(struct nvmet_req *req, u16 status)
@ -656,7 +657,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
struct nvme_error_slot *new_error_slot; struct nvme_error_slot *new_error_slot;
unsigned long flags; unsigned long flags;
req->rsp->status = cpu_to_le16(status << 1); req->cqe->status = cpu_to_le16(status << 1);
if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
return; return;
@ -676,15 +677,15 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
spin_unlock_irqrestore(&ctrl->error_lock, flags); spin_unlock_irqrestore(&ctrl->error_lock, flags);
/* set the more bit for this request */ /* set the more bit for this request */
req->rsp->status |= cpu_to_le16(1 << 14); req->cqe->status |= cpu_to_le16(1 << 14);
} }
static void __nvmet_req_complete(struct nvmet_req *req, u16 status) static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{ {
if (!req->sq->sqhd_disabled) if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req); nvmet_update_sq_head(req);
req->rsp->sq_id = cpu_to_le16(req->sq->qid); req->cqe->sq_id = cpu_to_le16(req->sq->qid);
req->rsp->command_id = req->cmd->common.command_id; req->cqe->command_id = req->cmd->common.command_id;
if (unlikely(status)) if (unlikely(status))
nvmet_set_error(req, status); nvmet_set_error(req, status);
@ -841,8 +842,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sg = NULL; req->sg = NULL;
req->sg_cnt = 0; req->sg_cnt = 0;
req->transfer_len = 0; req->transfer_len = 0;
req->rsp->status = 0; req->cqe->status = 0;
req->rsp->sq_head = 0; req->cqe->sq_head = 0;
req->ns = NULL; req->ns = NULL;
req->error_loc = NVMET_NO_ERROR_LOC; req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0; req->error_slba = 0;
@ -1069,7 +1070,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
if (!subsys) { if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n", pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn); subsysnqn);
req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
} }
@ -1090,7 +1091,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
pr_warn("could not find controller %d for subsys %s / host %s\n", pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn); cntlid, subsysnqn, hostnqn);
req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
out: out:
@ -1188,7 +1189,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!subsys) { if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n", pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn); subsysnqn);
req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
goto out; goto out;
} }
@ -1197,7 +1198,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!nvmet_host_allowed(subsys, hostnqn)) { if (!nvmet_host_allowed(subsys, hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n", pr_info("connect by host %s for subsystem %s not allowed\n",
hostnqn, subsysnqn); hostnqn, subsysnqn);
req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem); up_read(&nvmet_config_sem);
status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
goto out_put_subsystem; goto out_put_subsystem;
@ -1367,7 +1368,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
if (!subsys) if (!subsys)
return NULL; return ERR_PTR(-ENOMEM);
subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */ subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
/* generate a random serial number as our controllers are ephemeral: */ /* generate a random serial number as our controllers are ephemeral: */
@ -1383,14 +1384,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
default: default:
pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
kfree(subsys); kfree(subsys);
return NULL; return ERR_PTR(-EINVAL);
} }
subsys->type = type; subsys->type = type;
subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
GFP_KERNEL); GFP_KERNEL);
if (!subsys->subsysnqn) { if (!subsys->subsysnqn) {
kfree(subsys); kfree(subsys);
return NULL; return ERR_PTR(-ENOMEM);
} }
kref_init(&subsys->ref); kref_init(&subsys->ref);

View File

@ -372,8 +372,8 @@ int __init nvmet_init_discovery(void)
{ {
nvmet_disc_subsys = nvmet_disc_subsys =
nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC); nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
if (!nvmet_disc_subsys) if (IS_ERR(nvmet_disc_subsys))
return -ENOMEM; return PTR_ERR(nvmet_disc_subsys);
return 0; return 0;
} }

View File

@ -72,7 +72,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
offsetof(struct nvmf_property_get_command, attrib); offsetof(struct nvmf_property_get_command, attrib);
} }
req->rsp->result.u64 = cpu_to_le64(val); req->cqe->result.u64 = cpu_to_le64(val);
nvmet_req_complete(req, status); nvmet_req_complete(req, status);
} }
@ -124,7 +124,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) { if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
req->sq->sqhd_disabled = true; req->sq->sqhd_disabled = true;
req->rsp->sq_head = cpu_to_le16(0xffff); req->cqe->sq_head = cpu_to_le16(0xffff);
} }
if (ctrl->ops->install_queue) { if (ctrl->ops->install_queue) {
@ -158,7 +158,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out; goto out;
/* zero out initial completion result, assign values as needed */ /* zero out initial completion result, assign values as needed */
req->rsp->result.u32 = 0; req->cqe->result.u32 = 0;
if (c->recfmt != 0) { if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n", pr_warn("invalid connect version (%d).\n",
@ -172,7 +172,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_warn("connect attempt for invalid controller ID %#x\n", pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid); d->cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
goto out; goto out;
} }
@ -195,7 +195,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_info("creating controller %d for subsystem %s for NQN %s.\n", pr_info("creating controller %d for subsystem %s for NQN %s.\n",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn); ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
out: out:
kfree(d); kfree(d);
@ -222,7 +222,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out; goto out;
/* zero out initial completion result, assign values as needed */ /* zero out initial completion result, assign values as needed */
req->rsp->result.u32 = 0; req->cqe->result.u32 = 0;
if (c->recfmt != 0) { if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n", pr_warn("invalid connect version (%d).\n",
@ -240,14 +240,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (unlikely(qid > ctrl->subsys->max_qid)) { if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid); pr_warn("invalid queue id (%d)\n", qid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
goto out_ctrl_put; goto out_ctrl_put;
} }
status = nvmet_install_queue(ctrl, req); status = nvmet_install_queue(ctrl, req);
if (status) { if (status) {
/* pass back cntlid that had the issue of installing queue */ /* pass back cntlid that had the issue of installing queue */
req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
goto out_ctrl_put; goto out_ctrl_put;
} }

View File

@ -2184,7 +2184,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
} }
fod->req.cmd = &fod->cmdiubuf.sqe; fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.rsp = &fod->rspiubuf.cqe; fod->req.cqe = &fod->rspiubuf.cqe;
fod->req.port = tgtport->pe->port; fod->req.port = tgtport->pe->port;
/* clear any response payload */ /* clear any response payload */

View File

@ -49,7 +49,12 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
goto err; goto err;
ns->size = stat.size; ns->size = stat.size;
ns->blksize_shift = file_inode(ns->file)->i_blkbits; /*
* i_blkbits can be greater than the universally accepted upper bound,
* so make sure we export a sane namespace lba_shift.
*/
ns->blksize_shift = min_t(u8,
file_inode(ns->file)->i_blkbits, 12);
ns->bvec_cache = kmem_cache_create("nvmet-bvec", ns->bvec_cache = kmem_cache_create("nvmet-bvec",
NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),

View File

@ -18,7 +18,7 @@
struct nvme_loop_iod { struct nvme_loop_iod {
struct nvme_request nvme_req; struct nvme_request nvme_req;
struct nvme_command cmd; struct nvme_command cmd;
struct nvme_completion rsp; struct nvme_completion cqe;
struct nvmet_req req; struct nvmet_req req;
struct nvme_loop_queue *queue; struct nvme_loop_queue *queue;
struct work_struct work; struct work_struct work;
@ -94,7 +94,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
{ {
struct nvme_loop_queue *queue = struct nvme_loop_queue *queue =
container_of(req->sq, struct nvme_loop_queue, nvme_sq); container_of(req->sq, struct nvme_loop_queue, nvme_sq);
struct nvme_completion *cqe = req->rsp; struct nvme_completion *cqe = req->cqe;
/* /*
* AEN requests are special as they don't time out and can * AEN requests are special as they don't time out and can
@ -129,20 +129,6 @@ static void nvme_loop_execute_work(struct work_struct *work)
nvmet_req_execute(&iod->req); nvmet_req_execute(&iod->req);
} }
static enum blk_eh_timer_return
nvme_loop_timeout(struct request *rq, bool reserved)
{
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
/* queue error recovery */
nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
/* fail with DNR on admin cmd timeout */
nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
return BLK_EH_DONE;
}
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd) const struct blk_mq_queue_data *bd)
{ {
@ -207,7 +193,7 @@ static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
struct nvme_loop_iod *iod, unsigned int queue_idx) struct nvme_loop_iod *iod, unsigned int queue_idx)
{ {
iod->req.cmd = &iod->cmd; iod->req.cmd = &iod->cmd;
iod->req.rsp = &iod->rsp; iod->req.cqe = &iod->cqe;
iod->queue = &ctrl->queues[queue_idx]; iod->queue = &ctrl->queues[queue_idx];
INIT_WORK(&iod->work, nvme_loop_execute_work); INIT_WORK(&iod->work, nvme_loop_execute_work);
return 0; return 0;
@ -253,7 +239,6 @@ static const struct blk_mq_ops nvme_loop_mq_ops = {
.complete = nvme_loop_complete_rq, .complete = nvme_loop_complete_rq,
.init_request = nvme_loop_init_request, .init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_hctx, .init_hctx = nvme_loop_init_hctx,
.timeout = nvme_loop_timeout,
}; };
static const struct blk_mq_ops nvme_loop_admin_mq_ops = { static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
@ -261,7 +246,6 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
.complete = nvme_loop_complete_rq, .complete = nvme_loop_complete_rq,
.init_request = nvme_loop_init_request, .init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_admin_hctx, .init_hctx = nvme_loop_init_admin_hctx,
.timeout = nvme_loop_timeout,
}; };
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)

View File

@ -284,7 +284,7 @@ struct nvmet_fabrics_ops {
struct nvmet_req { struct nvmet_req {
struct nvme_command *cmd; struct nvme_command *cmd;
struct nvme_completion *rsp; struct nvme_completion *cqe;
struct nvmet_sq *sq; struct nvmet_sq *sq;
struct nvmet_cq *cq; struct nvmet_cq *cq;
struct nvmet_ns *ns; struct nvmet_ns *ns;
@ -322,7 +322,7 @@ extern struct workqueue_struct *buffered_io_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result) static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{ {
req->rsp->result.u32 = cpu_to_le32(result); req->cqe->result.u32 = cpu_to_le32(result);
} }
/* /*

View File

@ -160,7 +160,7 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
{ {
return !nvme_is_write(rsp->req.cmd) && return !nvme_is_write(rsp->req.cmd) &&
rsp->req.transfer_len && rsp->req.transfer_len &&
!rsp->req.rsp->status && !rsp->req.cqe->status &&
!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
} }
@ -364,16 +364,17 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r) struct nvmet_rdma_rsp *r)
{ {
/* NVMe CQE / RDMA SEND */ /* NVMe CQE / RDMA SEND */
r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
if (!r->req.rsp) if (!r->req.cqe)
goto out; goto out;
r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
sizeof(*r->req.rsp), DMA_TO_DEVICE); sizeof(*r->req.cqe), DMA_TO_DEVICE);
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
goto out_free_rsp; goto out_free_rsp;
r->send_sge.length = sizeof(*r->req.rsp); r->req.p2p_client = &ndev->device->dev;
r->send_sge.length = sizeof(*r->req.cqe);
r->send_sge.lkey = ndev->pd->local_dma_lkey; r->send_sge.lkey = ndev->pd->local_dma_lkey;
r->send_cqe.done = nvmet_rdma_send_done; r->send_cqe.done = nvmet_rdma_send_done;
@ -388,7 +389,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
return 0; return 0;
out_free_rsp: out_free_rsp:
kfree(r->req.rsp); kfree(r->req.cqe);
out: out:
return -ENOMEM; return -ENOMEM;
} }
@ -397,8 +398,8 @@ static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r) struct nvmet_rdma_rsp *r)
{ {
ib_dma_unmap_single(ndev->device, r->send_sge.addr, ib_dma_unmap_single(ndev->device, r->send_sge.addr,
sizeof(*r->req.rsp), DMA_TO_DEVICE); sizeof(*r->req.cqe), DMA_TO_DEVICE);
kfree(r->req.rsp); kfree(r->req.cqe);
} }
static int static int
@ -763,8 +764,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->send_sge.addr, cmd->send_sge.length, cmd->send_sge.addr, cmd->send_sge.length,
DMA_TO_DEVICE); DMA_TO_DEVICE);
cmd->req.p2p_client = &queue->dev->device->dev;
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops)) &queue->nvme_sq, &nvmet_rdma_ops))
return; return;

View File

@ -161,14 +161,14 @@ static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
{ {
return nvmet_tcp_has_data_in(cmd) && !cmd->req.rsp->status; return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
} }
static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
{ {
return !nvme_is_write(cmd->req.cmd) && return !nvme_is_write(cmd->req.cmd) &&
cmd->req.transfer_len > 0 && cmd->req.transfer_len > 0 &&
!cmd->req.rsp->status; !cmd->req.cqe->status;
} }
static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
@ -378,7 +378,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
pdu->hdr.plen = pdu->hdr.plen =
cpu_to_le32(pdu->hdr.hlen + hdgst + cpu_to_le32(pdu->hdr.hlen + hdgst +
cmd->req.transfer_len + ddgst); cmd->req.transfer_len + ddgst);
pdu->command_id = cmd->req.rsp->command_id; pdu->command_id = cmd->req.cqe->command_id;
pdu->data_length = cpu_to_le32(cmd->req.transfer_len); pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
pdu->data_offset = cpu_to_le32(cmd->wbytes_done); pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
@ -774,12 +774,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
return -EPROTO; return -EPROTO;
} }
if (icreq->maxr2t != 0) {
pr_err("queue %d: unsupported maxr2t %d\n", queue->idx,
le32_to_cpu(icreq->maxr2t) + 1);
return -EPROTO;
}
queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
if (queue->hdr_digest || queue->data_digest) { if (queue->hdr_digest || queue->data_digest) {
@ -1224,7 +1218,7 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
if (!c->rsp_pdu) if (!c->rsp_pdu)
goto out_free_cmd; goto out_free_cmd;
c->req.rsp = &c->rsp_pdu->cqe; c->req.cqe = &c->rsp_pdu->cqe;
c->data_pdu = page_frag_alloc(&queue->pf_cache, c->data_pdu = page_frag_alloc(&queue->pf_cache,
sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);

View File

@ -77,7 +77,7 @@ struct nvme_rdma_cm_rep {
* struct nvme_rdma_cm_rej - rdma connect reject * struct nvme_rdma_cm_rej - rdma connect reject
* *
* @recfmt: format of the RDMA Private Data * @recfmt: format of the RDMA Private Data
* @fsts: error status for the associated connect request * @sts: error status for the associated connect request
*/ */
struct nvme_rdma_cm_rej { struct nvme_rdma_cm_rej {
__le16 recfmt; __le16 recfmt;