Merge branch '3.1-rc-fixes' of git://linux-iscsi.org/target-pending

* '3.1-rc-fixes' of git://linux-iscsi.org/target-pending:
  iscsi-target: Fix sendpage breakage with proper padding+DataDigest iovec offsets
  iscsi-target: Disable markers + remove dangerous local scope array usage
  target: Skip non hex characters for VPD=0x83 NAA IEEE Registered Extended
  tcm_fc: Work queue based approach instead of managing own thread and event based mechanism
  tcm_fc: Invalidation of DDP context for FCoE target in error conditions
  target: Fix race between multiple invocations of target_qf_do_work()
This commit is contained in:
Linus Torvalds 2011-09-17 16:52:13 -07:00
commit 4be0ed42c5
8 changed files with 105 additions and 382 deletions

View File

@ -1430,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(
u8 DataSequenceInOrder = 0;
u8 ErrorRecoveryLevel = 0, SessionType = 0;
u8 IFMarker = 0, OFMarker = 0;
u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
u32 FirstBurstLength = 0, MaxBurstLength = 0;
struct iscsi_param *param = NULL;

View File

@ -874,40 +874,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
spin_unlock_bh(&sess->session_usage_lock);
}
/*
* Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
* array counts needed for sync and steering.
*/
static int iscsit_determine_sync_and_steering_counts(
struct iscsi_conn *conn,
struct iscsi_data_count *count)
{
u32 length = count->data_length;
u32 marker, markint;
count->sync_and_steering = 1;
marker = (count->type == ISCSI_RX_DATA) ?
conn->of_marker : conn->if_marker;
markint = (count->type == ISCSI_RX_DATA) ?
(conn->conn_ops->OFMarkInt * 4) :
(conn->conn_ops->IFMarkInt * 4);
count->ss_iov_count = count->iov_count;
while (length > 0) {
if (length >= marker) {
count->ss_iov_count += 3;
count->ss_marker_count += 2;
length -= marker;
marker = markint;
} else
length = 0;
}
return 0;
}
/*
* Setup conn->if_marker and conn->of_marker values based upon
* the initial marker-less interval. (see iSCSI v19 A.2)
@ -1290,7 +1256,7 @@ int iscsit_fe_sendpage_sg(
struct kvec iov;
u32 tx_hdr_size, data_len;
u32 offset = cmd->first_data_sg_off;
int tx_sent;
int tx_sent, iov_off;
send_hdr:
tx_hdr_size = ISCSI_HDR_LEN;
@ -1310,9 +1276,19 @@ send_hdr:
}
data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
if (conn->conn_ops->DataDigest)
/*
* Set iov_off used by padding and data digest tx_data() calls below
* in order to determine proper offset into cmd->iov_data[]
*/
if (conn->conn_ops->DataDigest) {
data_len -= ISCSI_CRC_LEN;
if (cmd->padding)
iov_off = (cmd->iov_data_count - 2);
else
iov_off = (cmd->iov_data_count - 1);
} else {
iov_off = (cmd->iov_data_count - 1);
}
/*
* Perform sendpage() for each page in the scatterlist
*/
@ -1341,8 +1317,7 @@ send_pg:
send_padding:
if (cmd->padding) {
struct kvec *iov_p =
&cmd->iov_data[cmd->iov_data_count-1];
struct kvec *iov_p = &cmd->iov_data[iov_off++];
tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
if (cmd->padding != tx_sent) {
@ -1356,8 +1331,7 @@ send_padding:
send_datacrc:
if (conn->conn_ops->DataDigest) {
struct kvec *iov_d =
&cmd->iov_data[cmd->iov_data_count];
struct kvec *iov_d = &cmd->iov_data[iov_off];
tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
if (ISCSI_CRC_LEN != tx_sent) {
@ -1431,8 +1405,7 @@ static int iscsit_do_rx_data(
struct iscsi_data_count *count)
{
int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
struct kvec iov[count->ss_iov_count], *iov_p;
struct kvec *iov_p;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
@ -1440,93 +1413,8 @@ static int iscsit_do_rx_data(
memset(&msg, 0, sizeof(struct msghdr));
if (count->sync_and_steering) {
int size = 0;
u32 i, orig_iov_count = 0;
u32 orig_iov_len = 0, orig_iov_loc = 0;
u32 iov_count = 0, per_iov_bytes = 0;
u32 *rx_marker, old_rx_marker = 0;
struct kvec *iov_record;
memset(&rx_marker_val, 0,
count->ss_marker_count * sizeof(u32));
memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
iov_record = count->iov;
orig_iov_count = count->iov_count;
rx_marker = &conn->of_marker;
i = 0;
size = data;
orig_iov_len = iov_record[orig_iov_loc].iov_len;
while (size > 0) {
pr_debug("rx_data: #1 orig_iov_len %u,"
" orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
pr_debug("rx_data: #2 rx_marker %u, size"
" %u\n", *rx_marker, size);
if (orig_iov_len >= *rx_marker) {
iov[iov_count].iov_len = *rx_marker;
iov[iov_count++].iov_base =
(iov_record[orig_iov_loc].iov_base +
per_iov_bytes);
iov[iov_count].iov_len = (MARKER_SIZE / 2);
iov[iov_count++].iov_base =
&rx_marker_val[rx_marker_iov++];
iov[iov_count].iov_len = (MARKER_SIZE / 2);
iov[iov_count++].iov_base =
&rx_marker_val[rx_marker_iov++];
old_rx_marker = *rx_marker;
/*
* OFMarkInt is in 32-bit words.
*/
*rx_marker = (conn->conn_ops->OFMarkInt * 4);
size -= old_rx_marker;
orig_iov_len -= old_rx_marker;
per_iov_bytes += old_rx_marker;
pr_debug("rx_data: #3 new_rx_marker"
" %u, size %u\n", *rx_marker, size);
} else {
iov[iov_count].iov_len = orig_iov_len;
iov[iov_count++].iov_base =
(iov_record[orig_iov_loc].iov_base +
per_iov_bytes);
per_iov_bytes = 0;
*rx_marker -= orig_iov_len;
size -= orig_iov_len;
if (size)
orig_iov_len =
iov_record[++orig_iov_loc].iov_len;
pr_debug("rx_data: #4 new_rx_marker"
" %u, size %u\n", *rx_marker, size);
}
}
data += (rx_marker_iov * (MARKER_SIZE / 2));
iov_p = &iov[0];
iov_len = iov_count;
if (iov_count > count->ss_iov_count) {
pr_err("iov_count: %d, count->ss_iov_count:"
" %d\n", iov_count, count->ss_iov_count);
return -1;
}
if (rx_marker_iov > count->ss_marker_count) {
pr_err("rx_marker_iov: %d, count->ss_marker"
"_count: %d\n", rx_marker_iov,
count->ss_marker_count);
return -1;
}
} else {
iov_p = count->iov;
iov_len = count->iov_count;
}
iov_p = count->iov;
iov_len = count->iov_count;
while (total_rx < data) {
rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
@ -1541,16 +1429,6 @@ static int iscsit_do_rx_data(
rx_loop, total_rx, data);
}
if (count->sync_and_steering) {
int j;
for (j = 0; j < rx_marker_iov; j++) {
pr_debug("rx_data: #5 j: %d, offset: %d\n",
j, rx_marker_val[j]);
conn->of_marker_offset = rx_marker_val[j];
}
total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
}
return total_rx;
}
@ -1559,8 +1437,7 @@ static int iscsit_do_tx_data(
struct iscsi_data_count *count)
{
int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
struct kvec iov[count->ss_iov_count], *iov_p;
struct kvec *iov_p;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
@ -1573,98 +1450,8 @@ static int iscsit_do_tx_data(
memset(&msg, 0, sizeof(struct msghdr));
if (count->sync_and_steering) {
int size = 0;
u32 i, orig_iov_count = 0;
u32 orig_iov_len = 0, orig_iov_loc = 0;
u32 iov_count = 0, per_iov_bytes = 0;
u32 *tx_marker, old_tx_marker = 0;
struct kvec *iov_record;
memset(&tx_marker_val, 0,
count->ss_marker_count * sizeof(u32));
memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
iov_record = count->iov;
orig_iov_count = count->iov_count;
tx_marker = &conn->if_marker;
i = 0;
size = data;
orig_iov_len = iov_record[orig_iov_loc].iov_len;
while (size > 0) {
pr_debug("tx_data: #1 orig_iov_len %u,"
" orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
pr_debug("tx_data: #2 tx_marker %u, size"
" %u\n", *tx_marker, size);
if (orig_iov_len >= *tx_marker) {
iov[iov_count].iov_len = *tx_marker;
iov[iov_count++].iov_base =
(iov_record[orig_iov_loc].iov_base +
per_iov_bytes);
tx_marker_val[tx_marker_iov] =
(size - *tx_marker);
iov[iov_count].iov_len = (MARKER_SIZE / 2);
iov[iov_count++].iov_base =
&tx_marker_val[tx_marker_iov++];
iov[iov_count].iov_len = (MARKER_SIZE / 2);
iov[iov_count++].iov_base =
&tx_marker_val[tx_marker_iov++];
old_tx_marker = *tx_marker;
/*
* IFMarkInt is in 32-bit words.
*/
*tx_marker = (conn->conn_ops->IFMarkInt * 4);
size -= old_tx_marker;
orig_iov_len -= old_tx_marker;
per_iov_bytes += old_tx_marker;
pr_debug("tx_data: #3 new_tx_marker"
" %u, size %u\n", *tx_marker, size);
pr_debug("tx_data: #4 offset %u\n",
tx_marker_val[tx_marker_iov-1]);
} else {
iov[iov_count].iov_len = orig_iov_len;
iov[iov_count++].iov_base
= (iov_record[orig_iov_loc].iov_base +
per_iov_bytes);
per_iov_bytes = 0;
*tx_marker -= orig_iov_len;
size -= orig_iov_len;
if (size)
orig_iov_len =
iov_record[++orig_iov_loc].iov_len;
pr_debug("tx_data: #5 new_tx_marker"
" %u, size %u\n", *tx_marker, size);
}
}
data += (tx_marker_iov * (MARKER_SIZE / 2));
iov_p = &iov[0];
iov_len = iov_count;
if (iov_count > count->ss_iov_count) {
pr_err("iov_count: %d, count->ss_iov_count:"
" %d\n", iov_count, count->ss_iov_count);
return -1;
}
if (tx_marker_iov > count->ss_marker_count) {
pr_err("tx_marker_iov: %d, count->ss_marker"
"_count: %d\n", tx_marker_iov,
count->ss_marker_count);
return -1;
}
} else {
iov_p = count->iov;
iov_len = count->iov_count;
}
iov_p = count->iov;
iov_len = count->iov_count;
while (total_tx < data) {
tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
@ -1679,9 +1466,6 @@ static int iscsit_do_tx_data(
tx_loop, total_tx, data);
}
if (count->sync_and_steering)
total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
return total_tx;
}
@ -1702,12 +1486,6 @@ int rx_data(
c.data_length = data;
c.type = ISCSI_RX_DATA;
if (conn->conn_ops->OFMarker &&
(conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
return -1;
}
return iscsit_do_rx_data(conn, &c);
}
@ -1728,12 +1506,6 @@ int tx_data(
c.data_length = data;
c.type = ISCSI_TX_DATA;
if (conn->conn_ops->IFMarker &&
(conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
return -1;
}
return iscsit_do_tx_data(conn, &c);
}

View File

@ -24,6 +24,7 @@
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@ -154,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
static void
target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off)
{
unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
unsigned char *buf = buf_off;
int cnt = 0, next = 1;
/*
* Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
* byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
* format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
* to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
* NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
* per device uniqeness.
*/
while (*p != '\0') {
if (cnt >= 13)
break;
if (!isxdigit(*p)) {
p++;
continue;
}
if (next != 0) {
buf[cnt++] |= hex_to_bin(*p++);
next = 0;
} else {
buf[cnt] = hex_to_bin(*p++) << 4;
next = 1;
}
}
}
/*
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
@ -219,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* VENDOR_SPECIFIC_IDENTIFIER and
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/
buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
target_parse_naa_6h_vendor_specific(dev, &buf[off]);
len = 20;
off = (len + 4);

View File

@ -977,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work)
{
struct se_device *dev = container_of(work, struct se_device,
qf_work_queue);
LIST_HEAD(qf_cmd_list);
struct se_cmd *cmd, *cmd_tmp;
spin_lock_irq(&dev->qf_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
spin_unlock_irq(&dev->qf_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
list_del(&cmd->se_qf_node);
atomic_dec(&dev->dev_qf_count);
smp_mb__after_atomic_dec();
spin_unlock_irq(&dev->qf_cmd_lock);
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@ -997,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work)
* has been added to head of queue
*/
transport_add_cmd_to_queue(cmd, cmd->t_state);
spin_lock_irq(&dev->qf_cmd_lock);
}
spin_unlock_irq(&dev->qf_cmd_lock);
}
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)

View File

@ -98,8 +98,7 @@ struct ft_tpg {
struct list_head list; /* linkage in ft_lport_acl tpg_list */
struct list_head lun_list; /* head of LUNs */
struct se_portal_group se_tpg;
struct task_struct *thread; /* processing thread */
struct se_queue_obj qobj; /* queue for processing thread */
struct workqueue_struct *workqueue;
};
struct ft_lport_acl {
@ -110,16 +109,10 @@ struct ft_lport_acl {
struct se_wwn fc_lport_wwn;
};
enum ft_cmd_state {
FC_CMD_ST_NEW = 0,
FC_CMD_ST_REJ
};
/*
* Commands
*/
struct ft_cmd {
enum ft_cmd_state state;
u32 lun; /* LUN from request */
struct ft_sess *sess; /* session held for cmd */
struct fc_seq *seq; /* sequence in exchange mgr */
@ -127,7 +120,7 @@ struct ft_cmd {
struct fc_frame *req_frame;
unsigned char *cdb; /* pointer to CDB inside frame */
u32 write_data_len; /* data received on writes */
struct se_queue_req se_req;
struct work_struct work;
/* Local sense buffer */
unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
u32 was_ddp_setup:1; /* Set only if ddp is setup */
@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);
/*
* other internal functions.
*/
int ft_thread(void *);
void ft_recv_req(struct ft_sess *, struct fc_frame *);
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);

View File

@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
int count;
se_cmd = &cmd->se_cmd;
pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
caller, cmd, cmd->sess, cmd->seq, se_cmd);
pr_debug("%s: cmd %p cdb %p\n",
caller, cmd, cmd->cdb);
pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
}
static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
{
struct ft_tpg *tpg = sess->tport->tpg;
struct se_queue_obj *qobj = &tpg->qobj;
unsigned long flags;
qobj = &sess->tport->tpg->qobj;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
atomic_inc(&qobj->queue_cnt);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
wake_up_process(tpg->thread);
}
static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
{
unsigned long flags;
struct se_queue_req *qr;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
if (list_empty(&qobj->qobj_list)) {
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return NULL;
}
qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
list_del(&qr->qr_list);
atomic_dec(&qobj->queue_cnt);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return container_of(qr, struct ft_cmd, se_req);
}
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
int ft_get_cmd_state(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
return cmd->state;
return 0;
}
int ft_is_state_remove(struct se_cmd *se_cmd)
@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
return 0;
}
static void ft_send_work(struct work_struct *work);
/*
* Handle incoming FCP command.
*/
@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
ft_queue_cmd(sess, cmd);
INIT_WORK(&cmd->work, ft_send_work);
queue_work(sess->tport->tpg->workqueue, &cmd->work);
return;
busy:
@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
/*
* Send new command to target.
*/
static void ft_send_cmd(struct ft_cmd *cmd)
static void ft_send_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
struct se_cmd *se_cmd;
struct fcp_cmnd *fcp;
int data_dir;
int data_dir = 0;
u32 data_len;
int task_attr;
int ret;
@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd)
err:
ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
}
/*
* Handle request in the command thread.
*/
static void ft_exec_req(struct ft_cmd *cmd)
{
pr_debug("cmd state %x\n", cmd->state);
switch (cmd->state) {
case FC_CMD_ST_NEW:
ft_send_cmd(cmd);
break;
default:
break;
}
}
/*
* Processing thread.
* Currently one thread per tpg.
*/
int ft_thread(void *arg)
{
struct ft_tpg *tpg = arg;
struct se_queue_obj *qobj = &tpg->qobj;
struct ft_cmd *cmd;
while (!kthread_should_stop()) {
schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
if (kthread_should_stop())
goto out;
cmd = ft_dequeue_cmd(qobj);
if (cmd)
ft_exec_req(cmd);
}
out:
return 0;
}

View File

@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg(
tpg->index = index;
tpg->lport_acl = lacl;
INIT_LIST_HEAD(&tpg->lun_list);
transport_init_queue_obj(&tpg->qobj);
ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
tpg, TRANSPORT_TPG_TYPE_NORMAL);
@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg(
return NULL;
}
tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
if (IS_ERR(tpg->thread)) {
tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
if (!tpg->workqueue) {
kfree(tpg);
return NULL;
}
@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
kthread_stop(tpg->thread);
destroy_workqueue(tpg->workqueue);
/* Wait for sessions to be freed thru RCU, for BUG_ON below */
synchronize_rcu();

View File

@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
if (cmd->was_ddp_setup) {
BUG_ON(!ep);
BUG_ON(!lport);
}
/*
* Doesn't expect payload if DDP is setup. Payload
* is expected to be copied directly to user buffers
* due to DDP (Large Rx offload),
*/
buf = fc_frame_payload_get(fp, 1);
if (buf)
pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
/*
* Since DDP (Large Rx offload) was setup for this request,
* payload is expected to be copied directly to user buffers.
*/
buf = fc_frame_payload_get(fp, 1);
if (buf)
pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
"cmd->sg_cnt 0x%x. DDP was setup"
" hence not expected to receive frame with "
"payload, Frame will be dropped if "
"'Sequence Initiative' bit in f_ctl is "
"payload, Frame will be dropped if"
"'Sequence Initiative' bit in f_ctl is"
"not set\n", __func__, ep->xid, f_ctl,
cmd->sg, cmd->sg_cnt);
/*
* Invalidate HW DDP context if it was setup for respective
* command. Invalidation of HW DDP context is requited in both
* situation (success and error).
*/
ft_invl_hw_context(cmd);
/*
* Invalidate HW DDP context if it was setup for respective
* command. Invalidation of HW DDP context is requited in both
* situation (success and error).
*/
ft_invl_hw_context(cmd);
/*
* If "Sequence Initiative (TSI)" bit set in f_ctl, means last
* write data frame is received successfully where payload is
* posted directly to user buffer and only the last frame's
* header is posted in receive queue.
*
* If "Sequence Initiative (TSI)" bit is not set, means error
* condition w.r.t. DDP, hence drop the packet and let explict
* ABORTS from other end of exchange timer trigger the recovery.
*/
if (f_ctl & FC_FC_SEQ_INIT)
goto last_frame;
else
goto drop;
/*
* If "Sequence Initiative (TSI)" bit set in f_ctl, means last
* write data frame is received successfully where payload is
* posted directly to user buffer and only the last frame's
* header is posted in receive queue.
*
* If "Sequence Initiative (TSI)" bit is not set, means error
* condition w.r.t. DDP, hence drop the packet and let explict
* ABORTS from other end of exchange timer trigger the recovery.
*/
if (f_ctl & FC_FC_SEQ_INIT)
goto last_frame;
else
goto drop;
}
rel_off = ntohl(fh->fh_parm_offset);
frame_len = fr_len(fp);