2019-02-18 18:35:19 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-12-02 16:28:44 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Avago Technologies. All rights reserved.
|
|
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/parser.h>
|
|
|
|
#include <uapi/scsi/fc/fc_fs.h>
|
|
|
|
|
|
|
|
#include "../host/nvme.h"
|
|
|
|
#include "../target/nvmet.h"
|
|
|
|
#include <linux/nvme-fc-driver.h>
|
|
|
|
#include <linux/nvme-fc.h>
|
|
|
|
|
|
|
|
|
|
|
|
enum {
|
|
|
|
NVMF_OPT_ERR = 0,
|
|
|
|
NVMF_OPT_WWNN = 1 << 0,
|
|
|
|
NVMF_OPT_WWPN = 1 << 1,
|
|
|
|
NVMF_OPT_ROLES = 1 << 2,
|
|
|
|
NVMF_OPT_FCADDR = 1 << 3,
|
|
|
|
NVMF_OPT_LPWWNN = 1 << 4,
|
|
|
|
NVMF_OPT_LPWWPN = 1 << 5,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct fcloop_ctrl_options {
|
|
|
|
int mask;
|
|
|
|
u64 wwnn;
|
|
|
|
u64 wwpn;
|
|
|
|
u32 roles;
|
|
|
|
u32 fcaddr;
|
|
|
|
u64 lpwwnn;
|
|
|
|
u64 lpwwpn;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const match_table_t opt_tokens = {
|
|
|
|
{ NVMF_OPT_WWNN, "wwnn=%s" },
|
|
|
|
{ NVMF_OPT_WWPN, "wwpn=%s" },
|
|
|
|
{ NVMF_OPT_ROLES, "roles=%d" },
|
|
|
|
{ NVMF_OPT_FCADDR, "fcaddr=%x" },
|
|
|
|
{ NVMF_OPT_LPWWNN, "lpwwnn=%s" },
|
|
|
|
{ NVMF_OPT_LPWWPN, "lpwwpn=%s" },
|
|
|
|
{ NVMF_OPT_ERR, NULL }
|
|
|
|
};
|
|
|
|
|
2020-05-26 12:21:18 +08:00
|
|
|
static int fcloop_verify_addr(substring_t *s)
|
|
|
|
{
|
|
|
|
size_t blen = s->to - s->from + 1;
|
|
|
|
|
|
|
|
if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
|
|
|
|
strncmp(s->from, "0x", 2))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
static int
|
|
|
|
fcloop_parse_options(struct fcloop_ctrl_options *opts,
|
|
|
|
const char *buf)
|
|
|
|
{
|
|
|
|
substring_t args[MAX_OPT_ARGS];
|
|
|
|
char *options, *o, *p;
|
|
|
|
int token, ret = 0;
|
|
|
|
u64 token64;
|
|
|
|
|
|
|
|
options = o = kstrdup(buf, GFP_KERNEL);
|
|
|
|
if (!options)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
while ((p = strsep(&o, ",\n")) != NULL) {
|
|
|
|
if (!*p)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
token = match_token(p, opt_tokens, args);
|
|
|
|
opts->mask |= token;
|
|
|
|
switch (token) {
|
|
|
|
case NVMF_OPT_WWNN:
|
2020-05-26 12:21:18 +08:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 16:28:44 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->wwnn = token64;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_WWPN:
|
2020-05-26 12:21:18 +08:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 16:28:44 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->wwpn = token64;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_ROLES:
|
|
|
|
if (match_int(args, &token)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->roles = token;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_FCADDR:
|
|
|
|
if (match_hex(args, &token)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->fcaddr = token;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_LPWWNN:
|
2020-05-26 12:21:18 +08:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 16:28:44 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->lpwwnn = token64;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_LPWWPN:
|
2020-05-26 12:21:18 +08:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 16:28:44 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->lpwwpn = token64;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_warn("unknown parameter or missing value '%s'\n", p);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free_options:
|
|
|
|
kfree(options);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
|
|
|
|
const char *buf)
|
|
|
|
{
|
|
|
|
substring_t args[MAX_OPT_ARGS];
|
|
|
|
char *options, *o, *p;
|
|
|
|
int token, ret = 0;
|
|
|
|
u64 token64;
|
|
|
|
|
|
|
|
*nname = -1;
|
|
|
|
*pname = -1;
|
|
|
|
|
|
|
|
options = o = kstrdup(buf, GFP_KERNEL);
|
|
|
|
if (!options)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
while ((p = strsep(&o, ",\n")) != NULL) {
|
|
|
|
if (!*p)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
token = match_token(p, opt_tokens, args);
|
|
|
|
switch (token) {
|
|
|
|
case NVMF_OPT_WWNN:
|
2020-05-26 12:21:18 +08:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 16:28:44 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
*nname = token64;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_WWPN:
|
2020-05-26 12:21:18 +08:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 16:28:44 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
*pname = token64;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_warn("unknown parameter or missing value '%s'\n", p);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free_options:
|
|
|
|
kfree(options);
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
if (*nname == -1)
|
|
|
|
return -EINVAL;
|
|
|
|
if (*pname == -1)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
|
|
|
|
|
|
|
|
#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
|
|
|
|
NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
|
|
|
|
|
|
|
|
#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
|
|
|
|
|
|
|
|
|
|
|
|
static DEFINE_SPINLOCK(fcloop_lock);
|
|
|
|
static LIST_HEAD(fcloop_lports);
|
|
|
|
static LIST_HEAD(fcloop_nports);
|
|
|
|
|
|
|
|
struct fcloop_lport {
|
|
|
|
struct nvme_fc_local_port *localport;
|
|
|
|
struct list_head lport_list;
|
|
|
|
struct completion unreg_done;
|
|
|
|
};
|
|
|
|
|
2017-11-30 08:47:31 +08:00
|
|
|
struct fcloop_lport_priv {
|
|
|
|
struct fcloop_lport *lport;
|
|
|
|
};
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
struct fcloop_rport {
|
2020-03-19 05:41:12 +08:00
|
|
|
struct nvme_fc_remote_port *remoteport;
|
|
|
|
struct nvmet_fc_target_port *targetport;
|
|
|
|
struct fcloop_nport *nport;
|
|
|
|
struct fcloop_lport *lport;
|
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head ls_list;
|
|
|
|
struct work_struct ls_work;
|
2016-12-02 16:28:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct fcloop_tport {
|
2020-04-01 00:50:01 +08:00
|
|
|
struct nvmet_fc_target_port *targetport;
|
|
|
|
struct nvme_fc_remote_port *remoteport;
|
|
|
|
struct fcloop_nport *nport;
|
|
|
|
struct fcloop_lport *lport;
|
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head ls_list;
|
|
|
|
struct work_struct ls_work;
|
2016-12-02 16:28:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct fcloop_nport {
|
|
|
|
struct fcloop_rport *rport;
|
|
|
|
struct fcloop_tport *tport;
|
|
|
|
struct fcloop_lport *lport;
|
|
|
|
struct list_head nport_list;
|
|
|
|
struct kref ref;
|
|
|
|
u64 node_name;
|
|
|
|
u64 port_name;
|
|
|
|
u32 port_role;
|
|
|
|
u32 port_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct fcloop_lsreq {
|
|
|
|
struct nvmefc_ls_req *lsreq;
|
2020-04-01 00:49:47 +08:00
|
|
|
struct nvmefc_ls_rsp ls_rsp;
|
2020-04-01 00:50:00 +08:00
|
|
|
int lsdir; /* H2T or T2H */
|
2016-12-02 16:28:44 +08:00
|
|
|
int status;
|
2020-03-19 05:41:12 +08:00
|
|
|
struct list_head ls_list; /* fcloop_rport->ls_list */
|
2016-12-02 16:28:44 +08:00
|
|
|
};
|
|
|
|
|
2019-05-15 05:58:04 +08:00
|
|
|
struct fcloop_rscn {
|
|
|
|
struct fcloop_tport *tport;
|
|
|
|
struct work_struct work;
|
|
|
|
};
|
|
|
|
|
2017-11-30 08:47:33 +08:00
|
|
|
enum {
|
|
|
|
INI_IO_START = 0,
|
|
|
|
INI_IO_ACTIVE = 1,
|
|
|
|
INI_IO_ABORTED = 2,
|
|
|
|
INI_IO_COMPLETED = 3,
|
|
|
|
};
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
struct fcloop_fcpreq {
|
|
|
|
struct fcloop_tport *tport;
|
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
spinlock_t reqlock;
|
2016-12-02 16:28:44 +08:00
|
|
|
u16 status;
|
2017-11-30 08:47:33 +08:00
|
|
|
u32 inistate;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
bool active;
|
|
|
|
bool aborted;
|
2017-11-30 08:47:33 +08:00
|
|
|
struct kref ref;
|
2017-11-30 08:47:32 +08:00
|
|
|
struct work_struct fcp_rcv_work;
|
|
|
|
struct work_struct abort_rcv_work;
|
|
|
|
struct work_struct tio_done_work;
|
2016-12-02 16:28:44 +08:00
|
|
|
struct nvmefc_tgt_fcp_req tgt_fcp_req;
|
|
|
|
};
|
|
|
|
|
2017-04-12 02:32:30 +08:00
|
|
|
struct fcloop_ini_fcpreq {
|
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
|
|
|
struct fcloop_fcpreq *tfcp_req;
|
2017-11-30 08:47:33 +08:00
|
|
|
spinlock_t inilock;
|
2017-04-12 02:32:30 +08:00
|
|
|
};
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
static inline struct fcloop_lsreq *
|
2020-04-01 00:49:47 +08:00
|
|
|
ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
|
2016-12-02 16:28:44 +08:00
|
|
|
{
|
2020-04-01 00:49:47 +08:00
|
|
|
return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct fcloop_fcpreq *
|
|
|
|
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
|
|
|
{
|
|
|
|
return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_create_queue(struct nvme_fc_local_port *localport,
|
|
|
|
unsigned int qidx, u16 qsize,
|
|
|
|
void **handle)
|
|
|
|
{
|
|
|
|
*handle = localport;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_delete_queue(struct nvme_fc_local_port *localport,
|
|
|
|
unsigned int idx, void *handle)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-03-19 05:41:12 +08:00
|
|
|
fcloop_rport_lsrqst_work(struct work_struct *work)
|
2016-12-02 16:28:44 +08:00
|
|
|
{
|
2020-03-19 05:41:12 +08:00
|
|
|
struct fcloop_rport *rport =
|
|
|
|
container_of(work, struct fcloop_rport, ls_work);
|
|
|
|
struct fcloop_lsreq *tls_req;
|
|
|
|
|
|
|
|
spin_lock(&rport->lock);
|
|
|
|
for (;;) {
|
|
|
|
tls_req = list_first_entry_or_null(&rport->ls_list,
|
|
|
|
struct fcloop_lsreq, ls_list);
|
|
|
|
if (!tls_req)
|
|
|
|
break;
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2020-03-19 05:41:12 +08:00
|
|
|
list_del(&tls_req->ls_list);
|
|
|
|
spin_unlock(&rport->lock);
|
|
|
|
|
|
|
|
tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
|
|
|
|
/*
|
|
|
|
* callee may free memory containing tls_req.
|
|
|
|
* do not reference lsreq after this.
|
|
|
|
*/
|
|
|
|
|
|
|
|
spin_lock(&rport->lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&rport->lock);
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-04-01 00:50:00 +08:00
|
|
|
fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
|
2016-12-02 16:28:44 +08:00
|
|
|
struct nvme_fc_remote_port *remoteport,
|
|
|
|
struct nvmefc_ls_req *lsreq)
|
|
|
|
{
|
|
|
|
struct fcloop_lsreq *tls_req = lsreq->private;
|
|
|
|
struct fcloop_rport *rport = remoteport->private;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
tls_req->lsreq = lsreq;
|
2020-03-19 05:41:12 +08:00
|
|
|
INIT_LIST_HEAD(&tls_req->ls_list);
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
if (!rport->targetport) {
|
|
|
|
tls_req->status = -ECONNREFUSED;
|
2020-03-19 05:41:12 +08:00
|
|
|
spin_lock(&rport->lock);
|
|
|
|
list_add_tail(&rport->ls_list, &tls_req->ls_list);
|
|
|
|
spin_unlock(&rport->lock);
|
|
|
|
schedule_work(&rport->ls_work);
|
2016-12-02 16:28:44 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
tls_req->status = 0;
|
2020-04-01 00:50:01 +08:00
|
|
|
ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
|
|
|
|
&tls_req->ls_rsp,
|
|
|
|
lsreq->rqstaddr, lsreq->rqstlen);
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-04-01 00:50:00 +08:00
|
|
|
fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
|
2020-04-01 00:49:47 +08:00
|
|
|
struct nvmefc_ls_rsp *lsrsp)
|
2016-12-02 16:28:44 +08:00
|
|
|
{
|
2020-04-01 00:49:47 +08:00
|
|
|
struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
|
2016-12-02 16:28:44 +08:00
|
|
|
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
|
2020-03-19 05:41:12 +08:00
|
|
|
struct fcloop_tport *tport = targetport->private;
|
|
|
|
struct nvme_fc_remote_port *remoteport = tport->remoteport;
|
|
|
|
struct fcloop_rport *rport;
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2020-04-01 00:49:47 +08:00
|
|
|
memcpy(lsreq->rspaddr, lsrsp->rspbuf,
|
|
|
|
((lsreq->rsplen < lsrsp->rsplen) ?
|
|
|
|
lsreq->rsplen : lsrsp->rsplen));
|
2020-03-19 05:41:12 +08:00
|
|
|
|
2020-04-01 00:49:47 +08:00
|
|
|
lsrsp->done(lsrsp);
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2020-03-19 05:41:12 +08:00
|
|
|
if (remoteport) {
|
|
|
|
rport = remoteport->private;
|
|
|
|
spin_lock(&rport->lock);
|
|
|
|
list_add_tail(&rport->ls_list, &tls_req->ls_list);
|
|
|
|
spin_unlock(&rport->lock);
|
|
|
|
schedule_work(&rport->ls_work);
|
|
|
|
}
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-01 00:50:01 +08:00
|
|
|
static void
|
|
|
|
fcloop_tport_lsrqst_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fcloop_tport *tport =
|
|
|
|
container_of(work, struct fcloop_tport, ls_work);
|
|
|
|
struct fcloop_lsreq *tls_req;
|
|
|
|
|
|
|
|
spin_lock(&tport->lock);
|
|
|
|
for (;;) {
|
|
|
|
tls_req = list_first_entry_or_null(&tport->ls_list,
|
|
|
|
struct fcloop_lsreq, ls_list);
|
|
|
|
if (!tls_req)
|
|
|
|
break;
|
|
|
|
|
|
|
|
list_del(&tls_req->ls_list);
|
|
|
|
spin_unlock(&tport->lock);
|
|
|
|
|
|
|
|
tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
|
|
|
|
/*
|
|
|
|
* callee may free memory containing tls_req.
|
|
|
|
* do not reference lsreq after this.
|
|
|
|
*/
|
|
|
|
|
|
|
|
spin_lock(&tport->lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&tport->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
|
|
|
|
struct nvmefc_ls_req *lsreq)
|
|
|
|
{
|
|
|
|
struct fcloop_lsreq *tls_req = lsreq->private;
|
|
|
|
struct fcloop_tport *tport = targetport->private;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* hosthandle should be the dst.rport value.
|
|
|
|
* hosthandle ignored as fcloop currently is
|
|
|
|
* 1:1 tgtport vs remoteport
|
|
|
|
*/
|
|
|
|
tls_req->lsreq = lsreq;
|
|
|
|
INIT_LIST_HEAD(&tls_req->ls_list);
|
|
|
|
|
|
|
|
if (!tport->remoteport) {
|
|
|
|
tls_req->status = -ECONNREFUSED;
|
|
|
|
spin_lock(&tport->lock);
|
|
|
|
list_add_tail(&tport->ls_list, &tls_req->ls_list);
|
|
|
|
spin_unlock(&tport->lock);
|
|
|
|
schedule_work(&tport->ls_work);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
tls_req->status = 0;
|
|
|
|
ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
|
|
|
|
lsreq->rqstaddr, lsreq->rqstlen);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
|
|
|
|
struct nvme_fc_remote_port *remoteport,
|
|
|
|
struct nvmefc_ls_rsp *lsrsp)
|
|
|
|
{
|
|
|
|
struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
|
|
|
|
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
|
|
|
|
struct fcloop_rport *rport = remoteport->private;
|
|
|
|
struct nvmet_fc_target_port *targetport = rport->targetport;
|
|
|
|
struct fcloop_tport *tport;
|
|
|
|
|
|
|
|
memcpy(lsreq->rspaddr, lsrsp->rspbuf,
|
|
|
|
((lsreq->rsplen < lsrsp->rsplen) ?
|
|
|
|
lsreq->rsplen : lsrsp->rsplen));
|
|
|
|
lsrsp->done(lsrsp);
|
|
|
|
|
|
|
|
if (targetport) {
|
|
|
|
tport = targetport->private;
|
|
|
|
spin_lock(&tport->lock);
|
|
|
|
list_add_tail(&tport->ls_list, &tls_req->ls_list);
|
|
|
|
spin_unlock(&tport->lock);
|
|
|
|
schedule_work(&tport->ls_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_t2h_host_release(void *hosthandle)
|
|
|
|
{
|
|
|
|
/* host handle ignored for now */
|
|
|
|
}
|
|
|
|
|
2019-05-15 05:58:04 +08:00
|
|
|
/*
|
|
|
|
* Simulate reception of RSCN and converting it to a initiator transport
|
|
|
|
* call to rescan a remote port.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fcloop_tgt_rscn_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fcloop_rscn *tgt_rscn =
|
|
|
|
container_of(work, struct fcloop_rscn, work);
|
|
|
|
struct fcloop_tport *tport = tgt_rscn->tport;
|
|
|
|
|
|
|
|
if (tport->remoteport)
|
|
|
|
nvme_fc_rescan_remoteport(tport->remoteport);
|
|
|
|
kfree(tgt_rscn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
|
|
|
|
{
|
|
|
|
struct fcloop_rscn *tgt_rscn;
|
|
|
|
|
|
|
|
tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
|
|
|
|
if (!tgt_rscn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tgt_rscn->tport = tgtport->private;
|
|
|
|
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
|
|
|
|
|
|
|
|
schedule_work(&tgt_rscn->work);
|
|
|
|
}
|
|
|
|
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
static void
|
2017-11-30 08:47:33 +08:00
|
|
|
fcloop_tfcp_req_free(struct kref *ref)
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
{
|
2017-11-30 08:47:32 +08:00
|
|
|
struct fcloop_fcpreq *tfcp_req =
|
2017-11-30 08:47:33 +08:00
|
|
|
container_of(ref, struct fcloop_fcpreq, ref);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
2017-11-30 08:47:33 +08:00
|
|
|
kfree(tfcp_req);
|
|
|
|
}
|
2017-11-30 08:47:32 +08:00
|
|
|
|
2017-11-30 08:47:33 +08:00
|
|
|
static void
|
|
|
|
fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
|
|
|
|
{
|
|
|
|
kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
|
|
|
|
{
|
|
|
|
return kref_get_unless_zero(&tfcp_req->ref);
|
2017-11-30 08:47:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
|
|
|
|
struct fcloop_fcpreq *tfcp_req, int status)
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
{
|
2017-11-30 08:47:32 +08:00
|
|
|
struct fcloop_ini_fcpreq *inireq = NULL;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
2017-11-30 08:47:32 +08:00
|
|
|
if (fcpreq) {
|
|
|
|
inireq = fcpreq->private;
|
2017-11-30 08:47:33 +08:00
|
|
|
spin_lock(&inireq->inilock);
|
2017-11-30 08:47:32 +08:00
|
|
|
inireq->tfcp_req = NULL;
|
2017-11-30 08:47:33 +08:00
|
|
|
spin_unlock(&inireq->inilock);
|
2017-11-30 08:47:32 +08:00
|
|
|
|
|
|
|
fcpreq->status = status;
|
|
|
|
fcpreq->done(fcpreq);
|
|
|
|
}
|
2017-11-30 08:47:33 +08:00
|
|
|
|
|
|
|
/* release original io reference on tgt struct */
|
|
|
|
fcloop_tfcp_req_put(tfcp_req);
|
|
|
|
}
|
|
|
|
|
2020-10-17 05:28:38 +08:00
|
|
|
static bool drop_fabric_opcode;
|
|
|
|
#define DROP_OPCODE_MASK 0x00FF
|
|
|
|
/* fabrics opcode will have a bit set above 1st byte */
|
|
|
|
static int drop_opcode = -1;
|
|
|
|
static int drop_instance;
|
|
|
|
static int drop_amount;
|
|
|
|
static int drop_current_cnt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Routine to parse io and determine if the io is to be dropped.
|
|
|
|
* Returns:
|
|
|
|
* 0 if io is not obstructed
|
|
|
|
* 1 if io was dropped
|
|
|
|
*/
|
|
|
|
static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
|
|
|
|
{
|
|
|
|
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
|
|
|
|
struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
|
|
|
|
struct nvme_command *sqe = &cmdiu->sqe;
|
|
|
|
|
|
|
|
if (drop_opcode == -1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
|
|
|
|
"inst %d start %d amt %d\n",
|
|
|
|
__func__, sqe->common.opcode, sqe->fabrics.fctype,
|
|
|
|
drop_fabric_opcode ? "y" : "n",
|
|
|
|
drop_opcode, drop_current_cnt, drop_instance, drop_amount);
|
|
|
|
|
|
|
|
if ((drop_fabric_opcode &&
|
|
|
|
(sqe->common.opcode != nvme_fabrics_command ||
|
|
|
|
sqe->fabrics.fctype != drop_opcode)) ||
|
|
|
|
(!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (++drop_current_cnt >= drop_instance) {
|
|
|
|
if (drop_current_cnt >= drop_instance + drop_amount)
|
|
|
|
drop_opcode = -1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-30 08:47:33 +08:00
|
|
|
static void
|
|
|
|
fcloop_fcp_recv_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req =
|
|
|
|
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
|
|
|
|
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
|
|
|
|
int ret = 0;
|
|
|
|
bool aborted = false;
|
|
|
|
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_lock_irq(&tfcp_req->reqlock);
|
2017-11-30 08:47:33 +08:00
|
|
|
switch (tfcp_req->inistate) {
|
|
|
|
case INI_IO_START:
|
|
|
|
tfcp_req->inistate = INI_IO_ACTIVE;
|
|
|
|
break;
|
|
|
|
case INI_IO_ABORTED:
|
|
|
|
aborted = true;
|
|
|
|
break;
|
|
|
|
default:
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
2017-11-30 08:47:33 +08:00
|
|
|
WARN_ON(1);
|
|
|
|
return;
|
|
|
|
}
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
2017-11-30 08:47:33 +08:00
|
|
|
|
|
|
|
if (unlikely(aborted))
|
|
|
|
ret = -ECANCELED;
|
2020-10-17 05:28:38 +08:00
|
|
|
else {
|
|
|
|
if (likely(!check_for_drop(tfcp_req)))
|
|
|
|
ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
|
2017-11-30 08:47:33 +08:00
|
|
|
&tfcp_req->tgt_fcp_req,
|
|
|
|
fcpreq->cmdaddr, fcpreq->cmdlen);
|
2020-10-17 05:28:38 +08:00
|
|
|
else
|
|
|
|
pr_info("%s: dropped command ********\n", __func__);
|
|
|
|
}
|
2017-11-30 08:47:33 +08:00
|
|
|
if (ret)
|
|
|
|
fcloop_call_host_done(fcpreq, tfcp_req, ret);
|
|
|
|
|
|
|
|
return;
|
2017-11-30 08:47:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_fcp_abort_recv_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req =
|
|
|
|
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
|
2017-11-30 08:47:33 +08:00
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
|
|
|
bool completed = false;
|
|
|
|
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_lock_irq(&tfcp_req->reqlock);
|
2017-11-30 08:47:33 +08:00
|
|
|
fcpreq = tfcp_req->fcpreq;
|
|
|
|
switch (tfcp_req->inistate) {
|
|
|
|
case INI_IO_ABORTED:
|
|
|
|
break;
|
|
|
|
case INI_IO_COMPLETED:
|
|
|
|
completed = true;
|
|
|
|
break;
|
|
|
|
default:
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
2017-11-30 08:47:33 +08:00
|
|
|
WARN_ON(1);
|
|
|
|
return;
|
|
|
|
}
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
2017-11-30 08:47:33 +08:00
|
|
|
|
|
|
|
if (unlikely(completed)) {
|
|
|
|
/* remove reference taken in original abort downcall */
|
|
|
|
fcloop_tfcp_req_put(tfcp_req);
|
|
|
|
return;
|
|
|
|
}
|
2017-11-30 08:47:32 +08:00
|
|
|
|
|
|
|
if (tfcp_req->tport->targetport)
|
|
|
|
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
|
|
|
|
&tfcp_req->tgt_fcp_req);
|
|
|
|
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_lock_irq(&tfcp_req->reqlock);
|
2017-11-30 08:47:32 +08:00
|
|
|
tfcp_req->fcpreq = NULL;
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
2017-11-30 08:47:32 +08:00
|
|
|
|
|
|
|
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
|
2017-11-30 08:47:33 +08:00
|
|
|
/* call_host_done releases reference for abort downcall */
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FCP IO operation done by target completion.
|
|
|
|
* call back up initiator "done" flows.
|
2016-12-02 16:28:44 +08:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req =
|
2017-11-30 08:47:32 +08:00
|
|
|
container_of(work, struct fcloop_fcpreq, tio_done_work);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_lock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
fcpreq = tfcp_req->fcpreq;
|
2017-11-30 08:47:33 +08:00
|
|
|
tfcp_req->inistate = INI_IO_COMPLETED;
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
2017-11-30 08:47:32 +08:00
|
|
|
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_fcp_req(struct nvme_fc_local_port *localport,
|
|
|
|
struct nvme_fc_remote_port *remoteport,
|
|
|
|
void *hw_queue_handle,
|
|
|
|
struct nvmefc_fcp_req *fcpreq)
|
|
|
|
{
|
|
|
|
struct fcloop_rport *rport = remoteport->private;
|
2017-04-12 02:32:30 +08:00
|
|
|
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
|
|
|
struct fcloop_fcpreq *tfcp_req;
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2017-04-12 02:32:30 +08:00
|
|
|
if (!rport->targetport)
|
|
|
|
return -ECONNREFUSED;
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2019-06-21 04:17:01 +08:00
|
|
|
tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
|
2017-04-12 02:32:30 +08:00
|
|
|
if (!tfcp_req)
|
|
|
|
return -ENOMEM;
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2017-04-12 02:32:30 +08:00
|
|
|
inireq->fcpreq = fcpreq;
|
|
|
|
inireq->tfcp_req = tfcp_req;
|
2017-11-30 08:47:33 +08:00
|
|
|
spin_lock_init(&inireq->inilock);
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
tfcp_req->fcpreq = fcpreq;
|
|
|
|
tfcp_req->tport = rport->targetport->private;
|
2017-11-30 08:47:33 +08:00
|
|
|
tfcp_req->inistate = INI_IO_START;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
spin_lock_init(&tfcp_req->reqlock);
|
2017-11-30 08:47:32 +08:00
|
|
|
INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
|
|
|
|
INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
|
|
|
|
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
|
2017-11-30 08:47:33 +08:00
|
|
|
kref_init(&tfcp_req->ref);
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2017-11-30 08:47:32 +08:00
|
|
|
schedule_work(&tfcp_req->fcp_rcv_work);
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2017-11-30 08:47:32 +08:00
|
|
|
return 0;
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
|
|
|
|
struct scatterlist *io_sg, u32 offset, u32 length)
|
|
|
|
{
|
|
|
|
void *data_p, *io_p;
|
|
|
|
u32 data_len, io_len, tlen;
|
|
|
|
|
|
|
|
io_p = sg_virt(io_sg);
|
|
|
|
io_len = io_sg->length;
|
|
|
|
|
|
|
|
for ( ; offset; ) {
|
|
|
|
tlen = min_t(u32, offset, io_len);
|
|
|
|
offset -= tlen;
|
|
|
|
io_len -= tlen;
|
|
|
|
if (!io_len) {
|
|
|
|
io_sg = sg_next(io_sg);
|
|
|
|
io_p = sg_virt(io_sg);
|
|
|
|
io_len = io_sg->length;
|
|
|
|
} else
|
|
|
|
io_p += tlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
data_p = sg_virt(data_sg);
|
|
|
|
data_len = data_sg->length;
|
|
|
|
|
|
|
|
for ( ; length; ) {
|
|
|
|
tlen = min_t(u32, io_len, data_len);
|
|
|
|
tlen = min_t(u32, tlen, length);
|
|
|
|
|
|
|
|
if (op == NVMET_FCOP_WRITEDATA)
|
|
|
|
memcpy(data_p, io_p, tlen);
|
|
|
|
else
|
|
|
|
memcpy(io_p, data_p, tlen);
|
|
|
|
|
|
|
|
length -= tlen;
|
|
|
|
|
|
|
|
io_len -= tlen;
|
|
|
|
if ((!io_len) && (length)) {
|
|
|
|
io_sg = sg_next(io_sg);
|
|
|
|
io_p = sg_virt(io_sg);
|
|
|
|
io_len = io_sg->length;
|
|
|
|
} else
|
|
|
|
io_p += tlen;
|
|
|
|
|
|
|
|
data_len -= tlen;
|
|
|
|
if ((!data_len) && (length)) {
|
|
|
|
data_sg = sg_next(data_sg);
|
|
|
|
data_p = sg_virt(data_sg);
|
|
|
|
data_len = data_sg->length;
|
|
|
|
} else
|
|
|
|
data_p += tlen;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
2016-12-02 16:28:44 +08:00
|
|
|
u32 rsplen = 0, xfrlen = 0;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
int fcp_err = 0, active, aborted;
|
2016-12-02 16:28:44 +08:00
|
|
|
u8 op = tgt_fcpreq->op;
|
|
|
|
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_lock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
fcpreq = tfcp_req->fcpreq;
|
|
|
|
active = tfcp_req->active;
|
|
|
|
aborted = tfcp_req->aborted;
|
|
|
|
tfcp_req->active = true;
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
|
|
|
if (unlikely(active))
|
|
|
|
/* illegal - call while i/o active */
|
|
|
|
return -EALREADY;
|
|
|
|
|
|
|
|
if (unlikely(aborted)) {
|
|
|
|
/* target transport has aborted i/o prior */
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_lock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
tfcp_req->active = false;
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
tgt_fcpreq->transferred_length = 0;
|
|
|
|
tgt_fcpreq->fcp_error = -ECANCELED;
|
|
|
|
tgt_fcpreq->done(tgt_fcpreq);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if fcpreq is NULL, the I/O has been aborted (from
|
|
|
|
* initiator side). For the target side, act as if all is well
|
|
|
|
* but don't actually move data.
|
|
|
|
*/
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
switch (op) {
|
|
|
|
case NVMET_FCOP_WRITEDATA:
|
|
|
|
xfrlen = tgt_fcpreq->transfer_length;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
if (fcpreq) {
|
|
|
|
fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
|
|
|
|
fcpreq->first_sgl, tgt_fcpreq->offset,
|
|
|
|
xfrlen);
|
|
|
|
fcpreq->transferred_length += xfrlen;
|
|
|
|
}
|
2016-12-02 16:28:44 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case NVMET_FCOP_READDATA:
|
|
|
|
case NVMET_FCOP_READDATA_RSP:
|
|
|
|
xfrlen = tgt_fcpreq->transfer_length;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
if (fcpreq) {
|
|
|
|
fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
|
|
|
|
fcpreq->first_sgl, tgt_fcpreq->offset,
|
|
|
|
xfrlen);
|
|
|
|
fcpreq->transferred_length += xfrlen;
|
|
|
|
}
|
2016-12-02 16:28:44 +08:00
|
|
|
if (op == NVMET_FCOP_READDATA)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Fall-Thru to RSP handling */
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
case NVMET_FCOP_RSP:
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
if (fcpreq) {
|
|
|
|
rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
|
|
|
|
fcpreq->rsplen : tgt_fcpreq->rsplen);
|
|
|
|
memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
|
|
|
|
if (rsplen < tgt_fcpreq->rsplen)
|
|
|
|
fcp_err = -E2BIG;
|
|
|
|
fcpreq->rcv_rsplen = rsplen;
|
|
|
|
fcpreq->status = 0;
|
|
|
|
}
|
2016-12-02 16:28:44 +08:00
|
|
|
tfcp_req->status = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
fcp_err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_lock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
tfcp_req->active = false;
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
tgt_fcpreq->transferred_length = xfrlen;
|
|
|
|
tgt_fcpreq->fcp_error = fcp_err;
|
|
|
|
tgt_fcpreq->done(tgt_fcpreq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
static void
|
|
|
|
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mark aborted only in case there were 2 threads in transport
|
|
|
|
* (one doing io, other doing abort) and only kills ops posted
|
|
|
|
* after the abort request
|
|
|
|
*/
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_lock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
tfcp_req->aborted = true;
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
2017-09-08 07:27:28 +08:00
|
|
|
tfcp_req->status = NVME_SC_INTERNAL;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* nothing more to do. If io wasn't active, the transport should
|
|
|
|
* immediately call the req_release. If it was active, the op
|
|
|
|
* will complete, and the lldd should call req_release.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2017-04-12 02:32:29 +08:00
|
|
|
static void
|
|
|
|
fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
|
|
|
|
2017-11-30 08:47:32 +08:00
|
|
|
schedule_work(&tfcp_req->tio_done_work);
|
2017-04-12 02:32:29 +08:00
|
|
|
}
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
static void
|
2020-04-01 00:50:00 +08:00
|
|
|
fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
|
2016-12-02 16:28:44 +08:00
|
|
|
struct nvme_fc_remote_port *remoteport,
|
|
|
|
struct nvmefc_ls_req *lsreq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-04-01 00:50:01 +08:00
|
|
|
static void
|
|
|
|
fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
|
|
|
|
void *hosthandle, struct nvmefc_ls_req *lsreq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
static void
|
|
|
|
fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
|
|
|
struct nvme_fc_remote_port *remoteport,
|
|
|
|
void *hw_queue_handle,
|
|
|
|
struct nvmefc_fcp_req *fcpreq)
|
|
|
|
{
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
2017-11-30 08:47:33 +08:00
|
|
|
struct fcloop_fcpreq *tfcp_req;
|
|
|
|
bool abortio = true;
|
|
|
|
|
|
|
|
spin_lock(&inireq->inilock);
|
|
|
|
tfcp_req = inireq->tfcp_req;
|
|
|
|
if (tfcp_req)
|
|
|
|
fcloop_tfcp_req_get(tfcp_req);
|
|
|
|
spin_unlock(&inireq->inilock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
|
|
|
if (!tfcp_req)
|
|
|
|
/* abort has already been called */
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* break initiator/target relationship for io */
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_lock_irq(&tfcp_req->reqlock);
|
2017-11-30 08:47:33 +08:00
|
|
|
switch (tfcp_req->inistate) {
|
|
|
|
case INI_IO_START:
|
|
|
|
case INI_IO_ACTIVE:
|
|
|
|
tfcp_req->inistate = INI_IO_ABORTED;
|
|
|
|
break;
|
|
|
|
case INI_IO_COMPLETED:
|
|
|
|
abortio = false;
|
|
|
|
break;
|
|
|
|
default:
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
2017-11-30 08:47:33 +08:00
|
|
|
WARN_ON(1);
|
|
|
|
return;
|
|
|
|
}
|
2019-06-21 04:07:00 +08:00
|
|
|
spin_unlock_irq(&tfcp_req->reqlock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
2017-11-30 08:47:33 +08:00
|
|
|
if (abortio)
|
|
|
|
/* leave the reference while the work item is scheduled */
|
|
|
|
WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* as the io has already had the done callback made,
|
|
|
|
* nothing more to do. So release the reference taken above
|
|
|
|
*/
|
|
|
|
fcloop_tfcp_req_put(tfcp_req);
|
|
|
|
}
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
2017-09-20 05:01:50 +08:00
|
|
|
static void
|
|
|
|
fcloop_nport_free(struct kref *ref)
|
|
|
|
{
|
|
|
|
struct fcloop_nport *nport =
|
|
|
|
container_of(ref, struct fcloop_nport, ref);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
list_del(&nport->nport_list);
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
kfree(nport);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_nport_put(struct fcloop_nport *nport)
|
|
|
|
{
|
|
|
|
kref_put(&nport->ref, fcloop_nport_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_nport_get(struct fcloop_nport *nport)
|
|
|
|
{
|
|
|
|
return kref_get_unless_zero(&nport->ref);
|
|
|
|
}
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
static void
|
|
|
|
fcloop_localport_delete(struct nvme_fc_local_port *localport)
|
|
|
|
{
|
2017-11-30 08:47:31 +08:00
|
|
|
struct fcloop_lport_priv *lport_priv = localport->private;
|
|
|
|
struct fcloop_lport *lport = lport_priv->lport;
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
/* release any threads waiting for the unreg to complete */
|
|
|
|
complete(&lport->unreg_done);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
|
|
|
{
|
|
|
|
struct fcloop_rport *rport = remoteport->private;
|
|
|
|
|
2020-03-19 05:41:12 +08:00
|
|
|
flush_work(&rport->ls_work);
|
2017-09-20 05:01:50 +08:00
|
|
|
fcloop_nport_put(rport->nport);
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
|
|
|
|
{
|
|
|
|
struct fcloop_tport *tport = targetport->private;
|
|
|
|
|
2020-04-01 00:50:01 +08:00
|
|
|
flush_work(&tport->ls_work);
|
2017-09-20 05:01:50 +08:00
|
|
|
fcloop_nport_put(tport->nport);
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define FCLOOP_HW_QUEUES 4
|
|
|
|
#define FCLOOP_SGL_SEGS 256
|
|
|
|
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
|
|
|
|
|
2017-04-21 16:37:25 +08:00
|
|
|
static struct nvme_fc_port_template fctemplate = {
|
2016-12-02 16:28:44 +08:00
|
|
|
.localport_delete = fcloop_localport_delete,
|
|
|
|
.remoteport_delete = fcloop_remoteport_delete,
|
|
|
|
.create_queue = fcloop_create_queue,
|
|
|
|
.delete_queue = fcloop_delete_queue,
|
2020-04-01 00:50:00 +08:00
|
|
|
.ls_req = fcloop_h2t_ls_req,
|
2016-12-02 16:28:44 +08:00
|
|
|
.fcp_io = fcloop_fcp_req,
|
2020-04-01 00:50:00 +08:00
|
|
|
.ls_abort = fcloop_h2t_ls_abort,
|
2016-12-02 16:28:44 +08:00
|
|
|
.fcp_abort = fcloop_fcp_abort,
|
2020-04-01 00:50:01 +08:00
|
|
|
.xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
|
2016-12-02 16:28:44 +08:00
|
|
|
.max_hw_queues = FCLOOP_HW_QUEUES,
|
|
|
|
.max_sgl_segments = FCLOOP_SGL_SEGS,
|
|
|
|
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
|
|
|
|
.dma_boundary = FCLOOP_DMABOUND_4G,
|
|
|
|
/* sizes of additional private data for data structures */
|
2017-11-30 08:47:31 +08:00
|
|
|
.local_priv_sz = sizeof(struct fcloop_lport_priv),
|
2016-12-02 16:28:44 +08:00
|
|
|
.remote_priv_sz = sizeof(struct fcloop_rport),
|
|
|
|
.lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
|
2017-04-12 02:32:30 +08:00
|
|
|
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
|
2016-12-02 16:28:44 +08:00
|
|
|
};
|
|
|
|
|
2017-04-21 16:37:25 +08:00
|
|
|
static struct nvmet_fc_target_template tgttemplate = {
|
2016-12-02 16:28:44 +08:00
|
|
|
.targetport_delete = fcloop_targetport_delete,
|
2020-04-01 00:50:00 +08:00
|
|
|
.xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
|
2016-12-02 16:28:44 +08:00
|
|
|
.fcp_op = fcloop_fcp_op,
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
.fcp_abort = fcloop_tgt_fcp_abort,
|
2017-04-12 02:32:29 +08:00
|
|
|
.fcp_req_release = fcloop_fcp_req_release,
|
2019-05-15 05:58:04 +08:00
|
|
|
.discovery_event = fcloop_tgt_discovery_evt,
|
2020-04-01 00:50:01 +08:00
|
|
|
.ls_req = fcloop_t2h_ls_req,
|
|
|
|
.ls_abort = fcloop_t2h_ls_abort,
|
|
|
|
.host_release = fcloop_t2h_host_release,
|
2016-12-02 16:28:44 +08:00
|
|
|
.max_hw_queues = FCLOOP_HW_QUEUES,
|
|
|
|
.max_sgl_segments = FCLOOP_SGL_SEGS,
|
|
|
|
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
|
|
|
|
.dma_boundary = FCLOOP_DMABOUND_4G,
|
|
|
|
/* optional features */
|
2017-11-30 08:47:32 +08:00
|
|
|
.target_features = 0,
|
2016-12-02 16:28:44 +08:00
|
|
|
/* sizes of additional private data for data structures */
|
|
|
|
.target_priv_sz = sizeof(struct fcloop_tport),
|
2020-04-01 00:50:01 +08:00
|
|
|
.lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
|
2016-12-02 16:28:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct nvme_fc_port_info pinfo;
|
|
|
|
struct fcloop_ctrl_options *opts;
|
|
|
|
struct nvme_fc_local_port *localport;
|
|
|
|
struct fcloop_lport *lport;
|
2017-11-30 08:47:31 +08:00
|
|
|
struct fcloop_lport_priv *lport_priv;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
|
|
lport = kzalloc(sizeof(*lport), GFP_KERNEL);
|
|
|
|
if (!lport)
|
|
|
|
return -ENOMEM;
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
|
|
|
|
if (!opts)
|
2017-11-30 08:47:31 +08:00
|
|
|
goto out_free_lport;
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
ret = fcloop_parse_options(opts, buf);
|
|
|
|
if (ret)
|
|
|
|
goto out_free_opts;
|
|
|
|
|
|
|
|
/* everything there ? */
|
|
|
|
if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_opts;
|
|
|
|
}
|
|
|
|
|
2017-09-20 05:01:50 +08:00
|
|
|
memset(&pinfo, 0, sizeof(pinfo));
|
2016-12-02 16:28:44 +08:00
|
|
|
pinfo.node_name = opts->wwnn;
|
|
|
|
pinfo.port_name = opts->wwpn;
|
|
|
|
pinfo.port_role = opts->roles;
|
|
|
|
pinfo.port_id = opts->fcaddr;
|
|
|
|
|
|
|
|
ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
|
|
|
|
if (!ret) {
|
|
|
|
/* success */
|
2017-11-30 08:47:31 +08:00
|
|
|
lport_priv = localport->private;
|
|
|
|
lport_priv->lport = lport;
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
lport->localport = localport;
|
|
|
|
INIT_LIST_HEAD(&lport->lport_list);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
list_add_tail(&lport->lport_list, &fcloop_lports);
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free_opts:
|
|
|
|
kfree(opts);
|
2017-11-30 08:47:31 +08:00
|
|
|
out_free_lport:
|
|
|
|
/* free only if we're going to fail */
|
|
|
|
if (ret)
|
|
|
|
kfree(lport);
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
__unlink_local_port(struct fcloop_lport *lport)
|
|
|
|
{
|
|
|
|
list_del(&lport->lport_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
__wait_localport_unreg(struct fcloop_lport *lport)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
init_completion(&lport->unreg_done);
|
|
|
|
|
|
|
|
ret = nvme_fc_unregister_localport(lport->localport);
|
|
|
|
|
|
|
|
wait_for_completion(&lport->unreg_done);
|
|
|
|
|
2017-11-30 08:47:31 +08:00
|
|
|
kfree(lport);
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct fcloop_lport *tlport, *lport = NULL;
|
|
|
|
u64 nodename, portname;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
list_for_each_entry(tlport, &fcloop_lports, lport_list) {
|
|
|
|
if (tlport->localport->node_name == nodename &&
|
|
|
|
tlport->localport->port_name == portname) {
|
|
|
|
lport = tlport;
|
|
|
|
__unlink_local_port(lport);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
if (!lport)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
ret = __wait_localport_unreg(lport);
|
|
|
|
|
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct fcloop_nport *
|
|
|
|
fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
|
|
|
|
{
|
|
|
|
struct fcloop_nport *newnport, *nport = NULL;
|
|
|
|
struct fcloop_lport *tmplport, *lport = NULL;
|
|
|
|
struct fcloop_ctrl_options *opts;
|
|
|
|
unsigned long flags;
|
|
|
|
u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
|
|
|
|
if (!opts)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ret = fcloop_parse_options(opts, buf);
|
|
|
|
if (ret)
|
|
|
|
goto out_free_opts;
|
|
|
|
|
|
|
|
/* everything there ? */
|
|
|
|
if ((opts->mask & opts_mask) != opts_mask) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_opts;
|
|
|
|
}
|
|
|
|
|
|
|
|
newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
|
|
|
|
if (!newnport)
|
|
|
|
goto out_free_opts;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&newnport->nport_list);
|
|
|
|
newnport->node_name = opts->wwnn;
|
|
|
|
newnport->port_name = opts->wwpn;
|
|
|
|
if (opts->mask & NVMF_OPT_ROLES)
|
|
|
|
newnport->port_role = opts->roles;
|
|
|
|
if (opts->mask & NVMF_OPT_FCADDR)
|
|
|
|
newnport->port_id = opts->fcaddr;
|
|
|
|
kref_init(&newnport->ref);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
|
|
|
|
if (tmplport->localport->node_name == opts->wwnn &&
|
|
|
|
tmplport->localport->port_name == opts->wwpn)
|
|
|
|
goto out_invalid_opts;
|
|
|
|
|
|
|
|
if (tmplport->localport->node_name == opts->lpwwnn &&
|
|
|
|
tmplport->localport->port_name == opts->lpwwpn)
|
|
|
|
lport = tmplport;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (remoteport) {
|
|
|
|
if (!lport)
|
|
|
|
goto out_invalid_opts;
|
|
|
|
newnport->lport = lport;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(nport, &fcloop_nports, nport_list) {
|
|
|
|
if (nport->node_name == opts->wwnn &&
|
|
|
|
nport->port_name == opts->wwpn) {
|
|
|
|
if ((remoteport && nport->rport) ||
|
|
|
|
(!remoteport && nport->tport)) {
|
|
|
|
nport = NULL;
|
|
|
|
goto out_invalid_opts;
|
|
|
|
}
|
|
|
|
|
|
|
|
fcloop_nport_get(nport);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
if (remoteport)
|
|
|
|
nport->lport = lport;
|
|
|
|
if (opts->mask & NVMF_OPT_ROLES)
|
|
|
|
nport->port_role = opts->roles;
|
|
|
|
if (opts->mask & NVMF_OPT_FCADDR)
|
|
|
|
nport->port_id = opts->fcaddr;
|
|
|
|
goto out_free_newnport;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail(&newnport->nport_list, &fcloop_nports);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
kfree(opts);
|
|
|
|
return newnport;
|
|
|
|
|
|
|
|
out_invalid_opts:
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
out_free_newnport:
|
|
|
|
kfree(newnport);
|
|
|
|
out_free_opts:
|
|
|
|
kfree(opts);
|
|
|
|
return nport;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct nvme_fc_remote_port *remoteport;
|
|
|
|
struct fcloop_nport *nport;
|
|
|
|
struct fcloop_rport *rport;
|
|
|
|
struct nvme_fc_port_info pinfo;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
nport = fcloop_alloc_nport(buf, count, true);
|
|
|
|
if (!nport)
|
|
|
|
return -EIO;
|
|
|
|
|
2017-09-20 05:01:50 +08:00
|
|
|
memset(&pinfo, 0, sizeof(pinfo));
|
2016-12-02 16:28:44 +08:00
|
|
|
pinfo.node_name = nport->node_name;
|
|
|
|
pinfo.port_name = nport->port_name;
|
|
|
|
pinfo.port_role = nport->port_role;
|
|
|
|
pinfo.port_id = nport->port_id;
|
|
|
|
|
|
|
|
ret = nvme_fc_register_remoteport(nport->lport->localport,
|
|
|
|
&pinfo, &remoteport);
|
|
|
|
if (ret || !remoteport) {
|
|
|
|
fcloop_nport_put(nport);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* success */
|
|
|
|
rport = remoteport->private;
|
|
|
|
rport->remoteport = remoteport;
|
|
|
|
rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
|
|
|
|
if (nport->tport) {
|
|
|
|
nport->tport->remoteport = remoteport;
|
|
|
|
nport->tport->lport = nport->lport;
|
|
|
|
}
|
|
|
|
rport->nport = nport;
|
|
|
|
rport->lport = nport->lport;
|
|
|
|
nport->rport = rport;
|
2020-03-19 05:41:12 +08:00
|
|
|
spin_lock_init(&rport->lock);
|
|
|
|
INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
|
|
|
|
INIT_LIST_HEAD(&rport->ls_list);
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2016-12-09 22:59:47 +08:00
|
|
|
return count;
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct fcloop_rport *
|
|
|
|
__unlink_remote_port(struct fcloop_nport *nport)
|
|
|
|
{
|
|
|
|
struct fcloop_rport *rport = nport->rport;
|
|
|
|
|
|
|
|
if (rport && nport->tport)
|
|
|
|
nport->tport->remoteport = NULL;
|
|
|
|
nport->rport = NULL;
|
|
|
|
|
|
|
|
return rport;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-09-20 05:01:50 +08:00
|
|
|
__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
|
2016-12-02 16:28:44 +08:00
|
|
|
{
|
|
|
|
if (!rport)
|
|
|
|
return -EALREADY;
|
|
|
|
|
2017-09-20 05:01:50 +08:00
|
|
|
return nvme_fc_unregister_remoteport(rport->remoteport);
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct fcloop_nport *nport = NULL, *tmpport;
|
|
|
|
static struct fcloop_rport *rport;
|
|
|
|
u64 nodename, portname;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
|
|
|
|
if (tmpport->node_name == nodename &&
|
|
|
|
tmpport->port_name == portname && tmpport->rport) {
|
|
|
|
nport = tmpport;
|
|
|
|
rport = __unlink_remote_port(nport);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
if (!nport)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2017-09-20 05:01:50 +08:00
|
|
|
ret = __remoteport_unreg(nport, rport);
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_fc_target_port *targetport;
|
|
|
|
struct fcloop_nport *nport;
|
|
|
|
struct fcloop_tport *tport;
|
|
|
|
struct nvmet_fc_port_info tinfo;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
nport = fcloop_alloc_nport(buf, count, false);
|
|
|
|
if (!nport)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
tinfo.node_name = nport->node_name;
|
|
|
|
tinfo.port_name = nport->port_name;
|
|
|
|
tinfo.port_id = nport->port_id;
|
|
|
|
|
|
|
|
ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
|
|
|
|
&targetport);
|
|
|
|
if (ret) {
|
|
|
|
fcloop_nport_put(nport);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* success */
|
|
|
|
tport = targetport->private;
|
|
|
|
tport->targetport = targetport;
|
|
|
|
tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
|
|
|
|
if (nport->rport)
|
|
|
|
nport->rport->targetport = targetport;
|
|
|
|
tport->nport = nport;
|
|
|
|
tport->lport = nport->lport;
|
|
|
|
nport->tport = tport;
|
2020-04-01 00:50:01 +08:00
|
|
|
spin_lock_init(&tport->lock);
|
|
|
|
INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
|
|
|
|
INIT_LIST_HEAD(&tport->ls_list);
|
2016-12-02 16:28:44 +08:00
|
|
|
|
2016-12-09 22:59:47 +08:00
|
|
|
return count;
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct fcloop_tport *
|
|
|
|
__unlink_target_port(struct fcloop_nport *nport)
|
|
|
|
{
|
|
|
|
struct fcloop_tport *tport = nport->tport;
|
|
|
|
|
|
|
|
if (tport && nport->rport)
|
|
|
|
nport->rport->targetport = NULL;
|
|
|
|
nport->tport = NULL;
|
|
|
|
|
|
|
|
return tport;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-09-20 05:01:50 +08:00
|
|
|
__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
|
2016-12-02 16:28:44 +08:00
|
|
|
{
|
|
|
|
if (!tport)
|
|
|
|
return -EALREADY;
|
|
|
|
|
2017-09-20 05:01:50 +08:00
|
|
|
return nvmet_fc_unregister_targetport(tport->targetport);
|
2016-12-02 16:28:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct fcloop_nport *nport = NULL, *tmpport;
|
2017-12-22 06:15:47 +08:00
|
|
|
struct fcloop_tport *tport = NULL;
|
2016-12-02 16:28:44 +08:00
|
|
|
u64 nodename, portname;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
|
|
|
|
if (tmpport->node_name == nodename &&
|
|
|
|
tmpport->port_name == portname && tmpport->tport) {
|
|
|
|
nport = tmpport;
|
|
|
|
tport = __unlink_target_port(nport);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
if (!nport)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2017-09-20 05:01:50 +08:00
|
|
|
ret = __targetport_unreg(nport, tport);
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
2020-10-17 05:28:38 +08:00
|
|
|
static ssize_t
|
|
|
|
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
2020-12-08 04:29:40 +08:00
|
|
|
unsigned int opcode;
|
|
|
|
int starting, amount;
|
2020-10-17 05:28:38 +08:00
|
|
|
|
|
|
|
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
|
|
|
|
return -EBADRQC;
|
|
|
|
|
|
|
|
drop_current_cnt = 0;
|
|
|
|
drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
|
|
|
|
drop_opcode = (opcode & DROP_OPCODE_MASK);
|
|
|
|
drop_instance = starting;
|
|
|
|
/* the check to drop routine uses instance + count to know when
|
|
|
|
* to end. Thus, if dropping 1 instance, count should be 0.
|
|
|
|
* so subtract 1 from the count.
|
|
|
|
*/
|
|
|
|
drop_amount = amount - 1;
|
|
|
|
|
|
|
|
pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
|
|
|
|
"instances\n",
|
|
|
|
__func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
|
|
|
|
drop_opcode, drop_amount);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
|
|
|
|
static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
|
|
|
|
static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
|
|
|
|
static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
|
|
|
|
static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
|
|
|
|
static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
|
2020-10-17 05:28:38 +08:00
|
|
|
static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
|
2016-12-02 16:28:44 +08:00
|
|
|
|
|
|
|
static struct attribute *fcloop_dev_attrs[] = {
|
|
|
|
&dev_attr_add_local_port.attr,
|
|
|
|
&dev_attr_del_local_port.attr,
|
|
|
|
&dev_attr_add_remote_port.attr,
|
|
|
|
&dev_attr_del_remote_port.attr,
|
|
|
|
&dev_attr_add_target_port.attr,
|
|
|
|
&dev_attr_del_target_port.attr,
|
2020-10-17 05:28:38 +08:00
|
|
|
&dev_attr_set_cmd_drop.attr,
|
2016-12-02 16:28:44 +08:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2021-01-09 07:41:47 +08:00
|
|
|
static const struct attribute_group fclopp_dev_attrs_group = {
|
2016-12-02 16:28:44 +08:00
|
|
|
.attrs = fcloop_dev_attrs,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *fcloop_dev_attr_groups[] = {
|
|
|
|
&fclopp_dev_attrs_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct class *fcloop_class;
|
|
|
|
static struct device *fcloop_device;
|
|
|
|
|
|
|
|
|
|
|
|
static int __init fcloop_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
fcloop_class = class_create(THIS_MODULE, "fcloop");
|
|
|
|
if (IS_ERR(fcloop_class)) {
|
|
|
|
pr_err("couldn't register class fcloop\n");
|
|
|
|
ret = PTR_ERR(fcloop_class);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
fcloop_device = device_create_with_groups(
|
|
|
|
fcloop_class, NULL, MKDEV(0, 0), NULL,
|
|
|
|
fcloop_dev_attr_groups, "ctl");
|
|
|
|
if (IS_ERR(fcloop_device)) {
|
|
|
|
pr_err("couldn't create ctl device!\n");
|
|
|
|
ret = PTR_ERR(fcloop_device);
|
|
|
|
goto out_destroy_class;
|
|
|
|
}
|
|
|
|
|
|
|
|
get_device(fcloop_device);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_destroy_class:
|
|
|
|
class_destroy(fcloop_class);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit fcloop_exit(void)
|
|
|
|
{
|
2020-12-08 04:29:40 +08:00
|
|
|
struct fcloop_lport *lport = NULL;
|
|
|
|
struct fcloop_nport *nport = NULL;
|
2016-12-02 16:28:44 +08:00
|
|
|
struct fcloop_tport *tport;
|
|
|
|
struct fcloop_rport *rport;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
nport = list_first_entry_or_null(&fcloop_nports,
|
|
|
|
typeof(*nport), nport_list);
|
|
|
|
if (!nport)
|
|
|
|
break;
|
|
|
|
|
|
|
|
tport = __unlink_target_port(nport);
|
|
|
|
rport = __unlink_remote_port(nport);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
2017-09-20 05:01:50 +08:00
|
|
|
ret = __targetport_unreg(nport, tport);
|
2016-12-02 16:28:44 +08:00
|
|
|
if (ret)
|
|
|
|
pr_warn("%s: Failed deleting target port\n", __func__);
|
|
|
|
|
2017-09-20 05:01:50 +08:00
|
|
|
ret = __remoteport_unreg(nport, rport);
|
2016-12-02 16:28:44 +08:00
|
|
|
if (ret)
|
|
|
|
pr_warn("%s: Failed deleting remote port\n", __func__);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
lport = list_first_entry_or_null(&fcloop_lports,
|
|
|
|
typeof(*lport), lport_list);
|
|
|
|
if (!lport)
|
|
|
|
break;
|
|
|
|
|
|
|
|
__unlink_local_port(lport);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
ret = __wait_localport_unreg(lport);
|
|
|
|
if (ret)
|
|
|
|
pr_warn("%s: Failed deleting local port\n", __func__);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
put_device(fcloop_device);
|
|
|
|
|
|
|
|
device_destroy(fcloop_class, MKDEV(0, 0));
|
|
|
|
class_destroy(fcloop_class);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(fcloop_init);
|
|
|
|
module_exit(fcloop_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL v2");
|