Merge branch 'nvme-4.20' of git://git.infradead.org/nvme into for-4.20/block
Pull NVMe updates from Christoph: "The second batch of updates for Linux 4.20: - lot of fixes for issues found by static type checkers from Bart - two small fixes from Keith - fabrics cleanups in preparation of the TCP transport from Sagi - more cleanups from Chaitanya" * 'nvme-4.20' of git://git.infradead.org/nvme: nvme-fabrics: move controller options matching to fabrics nvme-rdma: always have a valid trsvcid nvme-pci: remove duplicate check nvme-pci: fix hot removal during error handling nvmet-fcloop: suppress a compiler warning nvme-core: make implicit seed truncation explicit nvmet-fc: fix kernel-doc headers nvme-fc: rework the request initialization code nvme-fc: introduce struct nvme_fcp_op_w_sgl nvme-fc: fix kernel-doc headers nvmet: avoid integer overflow in the discard code nvmet-rdma: declare local symbols static nvmet: use strlcpy() instead of strcpy() nvme-pci: fix nvme_suspend_queue() kernel-doc header nvme-core: rework a NQN copying operation nvme-core: declare local symbols static nvmet-rdma: check for timeout in nvme_rdma_wait_for_cm() nvmet: use strcmp() instead of strncmp() for subsystem lookup nvmet: remove unreachable code nvme: update node paths after adding new path
This commit is contained in:
commit
bbc152825a
|
@ -1132,7 +1132,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
|||
|
||||
return nvme_submit_user_cmd(ns->queue, &c,
|
||||
(void __user *)(uintptr_t)io.addr, length,
|
||||
metadata, meta_len, io.slba, NULL, 0);
|
||||
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
|
||||
}
|
||||
|
||||
static u32 nvme_known_admin_effects(u8 opcode)
|
||||
|
@ -2076,7 +2076,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
|
|||
|
||||
nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
|
||||
if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
|
||||
strncpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
|
||||
strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2729,7 +2729,7 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
|
|||
return a->mode;
|
||||
}
|
||||
|
||||
const struct attribute_group nvme_ns_id_attr_group = {
|
||||
static const struct attribute_group nvme_ns_id_attr_group = {
|
||||
.attrs = nvme_ns_id_attrs,
|
||||
.is_visible = nvme_ns_id_attrs_are_visible,
|
||||
};
|
||||
|
|
|
@ -868,6 +868,36 @@ static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
|
||||
struct nvmf_ctrl_options *opts)
|
||||
{
|
||||
if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
|
||||
strcmp(opts->traddr, ctrl->opts->traddr) ||
|
||||
strcmp(opts->trsvcid, ctrl->opts->trsvcid))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Checking the local address is rough. In most cases, none is specified
|
||||
* and the host port is selected by the stack.
|
||||
*
|
||||
* Assume no match if:
|
||||
* - local address is specified and address is not the same
|
||||
* - local address is not specified but remote is, or vice versa
|
||||
* (admin using specific host_traddr when it matters).
|
||||
*/
|
||||
if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
|
||||
(ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
|
||||
if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
|
||||
return false;
|
||||
} else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
|
||||
(ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
|
||||
|
||||
static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
|
||||
unsigned int allowed_opts)
|
||||
{
|
||||
|
|
|
@ -166,6 +166,8 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
|
|||
struct request *rq);
|
||||
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
|
||||
bool queue_live);
|
||||
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
|
||||
struct nvmf_ctrl_options *opts);
|
||||
|
||||
static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
|
||||
bool queue_live)
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <uapi/scsi/fc/fc_fs.h>
|
||||
#include <uapi/scsi/fc/fc_els.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/overflow.h>
|
||||
|
||||
#include "nvme.h"
|
||||
#include "fabrics.h"
|
||||
|
@ -104,6 +105,12 @@ struct nvme_fc_fcp_op {
|
|||
struct nvme_fc_ersp_iu rsp_iu;
|
||||
};
|
||||
|
||||
struct nvme_fcp_op_w_sgl {
|
||||
struct nvme_fc_fcp_op op;
|
||||
struct scatterlist sgl[SG_CHUNK_SIZE];
|
||||
uint8_t priv[0];
|
||||
};
|
||||
|
||||
struct nvme_fc_lport {
|
||||
struct nvme_fc_local_port localport;
|
||||
|
||||
|
@ -317,7 +324,7 @@ out_done:
|
|||
* @template: LLDD entrypoints and operational parameters for the port
|
||||
* @dev: physical hardware device node port corresponds to. Will be
|
||||
* used for DMA mappings
|
||||
* @lport_p: pointer to a local port pointer. Upon success, the routine
|
||||
* @portptr: pointer to a local port pointer. Upon success, the routine
|
||||
* will allocate a nvme_fc_local_port structure and place its
|
||||
* address in the local port pointer. Upon failure, local port
|
||||
* pointer will be set to 0.
|
||||
|
@ -425,8 +432,7 @@ EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
|
|||
* nvme_fc_unregister_localport - transport entry point called by an
|
||||
* LLDD to deregister/remove a previously
|
||||
* registered a NVME host FC port.
|
||||
* @localport: pointer to the (registered) local port that is to be
|
||||
* deregistered.
|
||||
* @portptr: pointer to the (registered) local port that is to be deregistered.
|
||||
*
|
||||
* Returns:
|
||||
* a completion status. Must be 0 upon success; a negative errno
|
||||
|
@ -632,7 +638,7 @@ __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
|
|||
* @localport: pointer to the (registered) local port that the remote
|
||||
* subsystem port is connected to.
|
||||
* @pinfo: pointer to information about the port to be registered
|
||||
* @rport_p: pointer to a remote port pointer. Upon success, the routine
|
||||
* @portptr: pointer to a remote port pointer. Upon success, the routine
|
||||
* will allocate a nvme_fc_remote_port structure and place its
|
||||
* address in the remote port pointer. Upon failure, remote port
|
||||
* pointer will be set to 0.
|
||||
|
@ -809,8 +815,8 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
|
|||
* nvme_fc_unregister_remoteport - transport entry point called by an
|
||||
* LLDD to deregister/remove a previously
|
||||
* registered a NVME subsystem FC port.
|
||||
* @remoteport: pointer to the (registered) remote port that is to be
|
||||
* deregistered.
|
||||
* @portptr: pointer to the (registered) remote port that is to be
|
||||
* deregistered.
|
||||
*
|
||||
* Returns:
|
||||
* a completion status. Must be 0 upon success; a negative errno
|
||||
|
@ -1687,6 +1693,8 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
|
|||
struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
|
||||
struct request *rq, u32 rqno)
|
||||
{
|
||||
struct nvme_fcp_op_w_sgl *op_w_sgl =
|
||||
container_of(op, typeof(*op_w_sgl), op);
|
||||
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1696,7 +1704,6 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
|
|||
op->fcp_req.rspaddr = &op->rsp_iu;
|
||||
op->fcp_req.rsplen = sizeof(op->rsp_iu);
|
||||
op->fcp_req.done = nvme_fc_fcpio_done;
|
||||
op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
|
||||
op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
|
||||
op->ctrl = ctrl;
|
||||
op->queue = queue;
|
||||
|
@ -1735,12 +1742,17 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
|||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct nvme_fc_ctrl *ctrl = set->driver_data;
|
||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
||||
struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
|
||||
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
|
||||
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
|
||||
int res;
|
||||
|
||||
nvme_req(rq)->ctrl = &ctrl->ctrl;
|
||||
return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
|
||||
res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
|
||||
if (res)
|
||||
return res;
|
||||
op->op.fcp_req.first_sgl = &op->sgl[0];
|
||||
return res;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1770,7 +1782,6 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
|
|||
}
|
||||
|
||||
aen_op->flags = FCOP_FLAGS_AEN;
|
||||
aen_op->fcp_req.first_sgl = NULL; /* no sg list */
|
||||
aen_op->fcp_req.private = private;
|
||||
|
||||
memset(sqe, 0, sizeof(*sqe));
|
||||
|
@ -2424,10 +2435,9 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
|
|||
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
|
||||
ctrl->tag_set.numa_node = NUMA_NO_NODE;
|
||||
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
|
||||
(SG_CHUNK_SIZE *
|
||||
sizeof(struct scatterlist)) +
|
||||
ctrl->lport->ops->fcprqst_priv_sz;
|
||||
ctrl->tag_set.cmd_size =
|
||||
struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
|
||||
ctrl->lport->ops->fcprqst_priv_sz);
|
||||
ctrl->tag_set.driver_data = ctrl;
|
||||
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
|
||||
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
|
||||
|
@ -3029,10 +3039,9 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|||
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
|
||||
ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
|
||||
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
|
||||
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
|
||||
(SG_CHUNK_SIZE *
|
||||
sizeof(struct scatterlist)) +
|
||||
ctrl->lport->ops->fcprqst_priv_sz;
|
||||
ctrl->admin_tag_set.cmd_size =
|
||||
struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
|
||||
ctrl->lport->ops->fcprqst_priv_sz);
|
||||
ctrl->admin_tag_set.driver_data = ctrl;
|
||||
ctrl->admin_tag_set.nr_hw_queues = 1;
|
||||
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
|
||||
|
|
|
@ -321,6 +321,15 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
|
|||
device_add_disk(&head->subsys->dev, head->disk,
|
||||
nvme_ns_id_attr_groups);
|
||||
|
||||
if (nvme_path_is_optimized(ns)) {
|
||||
int node, srcu_idx;
|
||||
|
||||
srcu_idx = srcu_read_lock(&head->srcu);
|
||||
for_each_node(node)
|
||||
__nvme_find_path(head, node);
|
||||
srcu_read_unlock(&head->srcu, srcu_idx);
|
||||
}
|
||||
|
||||
kblockd_schedule_work(&ns->head->requeue_work);
|
||||
}
|
||||
|
||||
|
|
|
@ -772,10 +772,10 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|||
|
||||
if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
|
||||
goto out_unmap;
|
||||
|
||||
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
|
||||
}
|
||||
|
||||
if (blk_integrity_rq(req))
|
||||
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
|
||||
return BLK_STS_OK;
|
||||
|
||||
out_unmap:
|
||||
|
@ -1249,7 +1249,7 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
|
|||
|
||||
/**
|
||||
* nvme_suspend_queue - put queue into suspended state
|
||||
* @nvmeq - queue to suspend
|
||||
* @nvmeq: queue to suspend
|
||||
*/
|
||||
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
|
||||
{
|
||||
|
@ -2564,13 +2564,12 @@ static void nvme_remove(struct pci_dev *pdev)
|
|||
struct nvme_dev *dev = pci_get_drvdata(pdev);
|
||||
|
||||
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
|
||||
|
||||
cancel_work_sync(&dev->ctrl.reset_work);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
|
||||
if (!pci_device_is_present(pdev)) {
|
||||
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
|
||||
nvme_dev_disable(dev, true);
|
||||
nvme_dev_remove_admin(dev);
|
||||
}
|
||||
|
||||
flush_work(&dev->ctrl.reset_work);
|
||||
|
|
|
@ -233,8 +233,15 @@ static void nvme_rdma_qp_event(struct ib_event *event, void *context)
|
|||
|
||||
static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
|
||||
{
|
||||
wait_for_completion_interruptible_timeout(&queue->cm_done,
|
||||
int ret;
|
||||
|
||||
ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
|
||||
msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0)
|
||||
return -ETIMEDOUT;
|
||||
WARN_ON_ONCE(queue->cm_error > 0);
|
||||
return queue->cm_error;
|
||||
}
|
||||
|
||||
|
@ -1849,54 +1856,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
|||
.stop_ctrl = nvme_rdma_stop_ctrl,
|
||||
};
|
||||
|
||||
static inline bool
|
||||
__nvme_rdma_options_match(struct nvme_rdma_ctrl *ctrl,
|
||||
struct nvmf_ctrl_options *opts)
|
||||
{
|
||||
char *stdport = __stringify(NVME_RDMA_IP_PORT);
|
||||
|
||||
|
||||
if (!nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts) ||
|
||||
strcmp(opts->traddr, ctrl->ctrl.opts->traddr))
|
||||
return false;
|
||||
|
||||
if (opts->mask & NVMF_OPT_TRSVCID &&
|
||||
ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
|
||||
if (strcmp(opts->trsvcid, ctrl->ctrl.opts->trsvcid))
|
||||
return false;
|
||||
} else if (opts->mask & NVMF_OPT_TRSVCID) {
|
||||
if (strcmp(opts->trsvcid, stdport))
|
||||
return false;
|
||||
} else if (ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
|
||||
if (strcmp(stdport, ctrl->ctrl.opts->trsvcid))
|
||||
return false;
|
||||
}
|
||||
/* else, it's a match as both have stdport. Fall to next checks */
|
||||
|
||||
/*
|
||||
* checking the local address is rough. In most cases, one
|
||||
* is not specified and the host port is selected by the stack.
|
||||
*
|
||||
* Assume no match if:
|
||||
* local address is specified and address is not the same
|
||||
* local address is not specified but remote is, or vice versa
|
||||
* (admin using specific host_traddr when it matters).
|
||||
*/
|
||||
if (opts->mask & NVMF_OPT_HOST_TRADDR &&
|
||||
ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
|
||||
if (strcmp(opts->host_traddr, ctrl->ctrl.opts->host_traddr))
|
||||
return false;
|
||||
} else if (opts->mask & NVMF_OPT_HOST_TRADDR ||
|
||||
ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
|
||||
return false;
|
||||
/*
|
||||
* if neither controller had an host port specified, assume it's
|
||||
* a match as everything else matched.
|
||||
*/
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fails a connection request if it matches an existing controller
|
||||
* (association) with the same tuple:
|
||||
|
@ -1917,7 +1876,7 @@ nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
|
|||
|
||||
mutex_lock(&nvme_rdma_ctrl_mutex);
|
||||
list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
|
||||
found = __nvme_rdma_options_match(ctrl, opts);
|
||||
found = nvmf_ip_options_match(&ctrl->ctrl, opts);
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
|
@ -1932,7 +1891,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|||
struct nvme_rdma_ctrl *ctrl;
|
||||
int ret;
|
||||
bool changed;
|
||||
char *port;
|
||||
|
||||
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
|
||||
if (!ctrl)
|
||||
|
@ -1940,15 +1898,21 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|||
ctrl->ctrl.opts = opts;
|
||||
INIT_LIST_HEAD(&ctrl->list);
|
||||
|
||||
if (opts->mask & NVMF_OPT_TRSVCID)
|
||||
port = opts->trsvcid;
|
||||
else
|
||||
port = __stringify(NVME_RDMA_IP_PORT);
|
||||
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
|
||||
opts->trsvcid =
|
||||
kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL);
|
||||
if (!opts->trsvcid) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_ctrl;
|
||||
}
|
||||
opts->mask |= NVMF_OPT_TRSVCID;
|
||||
}
|
||||
|
||||
ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
|
||||
opts->traddr, port, &ctrl->addr);
|
||||
opts->traddr, opts->trsvcid, &ctrl->addr);
|
||||
if (ret) {
|
||||
pr_err("malformed address passed: %s:%s\n", opts->traddr, port);
|
||||
pr_err("malformed address passed: %s:%s\n",
|
||||
opts->traddr, opts->trsvcid);
|
||||
goto out_free_ctrl;
|
||||
}
|
||||
|
||||
|
|
|
@ -353,7 +353,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
|||
if (req->port->inline_data_size)
|
||||
id->sgls |= cpu_to_le32(1 << 20);
|
||||
|
||||
strcpy(id->subnqn, ctrl->subsys->subsysnqn);
|
||||
strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
|
||||
|
||||
/* Max command capsule size is sqe + single page of in-capsule data */
|
||||
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
|
||||
|
|
|
@ -1105,8 +1105,7 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
|
|||
if (!port)
|
||||
return NULL;
|
||||
|
||||
if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
|
||||
NVMF_NQN_SIZE)) {
|
||||
if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
|
||||
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
|
||||
return NULL;
|
||||
return nvmet_disc_subsys;
|
||||
|
|
|
@ -174,7 +174,7 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
|
|||
if (req->port->inline_data_size)
|
||||
id->sgls |= cpu_to_le32(1 << 20);
|
||||
|
||||
strcpy(id->subnqn, ctrl->subsys->subsysnqn);
|
||||
strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
|
||||
|
||||
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
||||
|
||||
|
@ -219,12 +219,10 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
|
|||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
default:
|
||||
pr_err("unsupported cmd %d\n", cmd->common.opcode);
|
||||
pr_err("unhandled cmd %d\n", cmd->common.opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
pr_err("unhandled cmd %d\n", cmd->common.opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
int __init nvmet_init_discovery(void)
|
||||
|
|
|
@ -1245,8 +1245,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
|
|||
* nvme_fc_unregister_targetport - transport entry point called by an
|
||||
* LLDD to deregister/remove a previously
|
||||
* registered a local NVME subsystem FC port.
|
||||
* @tgtport: pointer to the (registered) target port that is to be
|
||||
* deregistered.
|
||||
* @target_port: pointer to the (registered) target port that is to be
|
||||
* deregistered.
|
||||
*
|
||||
* Returns:
|
||||
* a completion status. Must be 0 upon success; a negative errno
|
||||
|
@ -1749,7 +1749,7 @@ nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
|
|||
*
|
||||
* If this routine returns error, the LLDD should abort the exchange.
|
||||
*
|
||||
* @tgtport: pointer to the (registered) target port the LS was
|
||||
* @target_port: pointer to the (registered) target port the LS was
|
||||
* received on.
|
||||
* @lsreq: pointer to a lsreq request structure to be used to reference
|
||||
* the exchange corresponding to the LS.
|
||||
|
|
|
@ -648,6 +648,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
|||
break;
|
||||
|
||||
/* Fall-Thru to RSP handling */
|
||||
/* FALLTHRU */
|
||||
|
||||
case NVMET_FCOP_RSP:
|
||||
if (fcpreq) {
|
||||
|
|
|
@ -246,7 +246,8 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
|
|||
break;
|
||||
|
||||
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
|
||||
len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
|
||||
len = le32_to_cpu(range.nlb);
|
||||
len <<= req->ns->blksize_shift;
|
||||
if (offset + len > req->ns->size) {
|
||||
ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
||||
break;
|
||||
|
|
|
@ -122,7 +122,7 @@ struct nvmet_rdma_device {
|
|||
int inline_page_count;
|
||||
};
|
||||
|
||||
struct workqueue_struct *nvmet_rdma_delete_wq;
|
||||
static struct workqueue_struct *nvmet_rdma_delete_wq;
|
||||
static bool nvmet_rdma_use_srq;
|
||||
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
|
||||
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
|
||||
|
|
Loading…
Reference in New Issue