Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
 "Lots of activity this round on performance improvements in target-core
  while benchmarking the prototype scsi-mq initiator code with
  vhost-scsi fabric ports, along with a number of iscsi/iser-target
  improvements and hardening fixes for exception path cases post v3.10
  merge.

  The highlights include:

   - Make persistent reservations APTPL buffer allocated on-demand, and
     drop per t10_reservation buffer.  (grover)
   - Make virtual LUN=0 a NULLIO device, and skip allocation of NULLIO
     device pages (grover)
   - Add transport_cmd_check_stop write_pending bit to avoid extra
     access of ->t_state_lock is WRITE I/O submission fast-path.  (nab)
   - Drop unnecessary CMD_T_DEV_ACTIVE check from
     transport_lun_remove_cmd to avoid extra access of ->t_state_lock in
     release fast-path.  (nab)
   - Avoid extra t_state_lock access in __target_execute_cmd fast-path
     (nab)
   - Drop unnecessary vhost-scsi wait_for_tasks=true usage +
     ->t_state_lock access in release fast-path.  (nab)
   - Convert vhost-scsi to use modern se_cmd->cmd_kref
     TARGET_SCF_ACK_KREF usage (nab)
   - Add tracepoints for SCSI commands being processed (roland)
   - Refactoring of iscsi-target handling of ISCSI_OP_NOOP +
     ISCSI_OP_TEXT to be transport independent (nab)
   - Add iscsi-target SendTargets=$IQN support for in-band discovery
     (nab)
   - Add iser-target support for in-band discovery (nab + Or)
   - Add iscsi-target demo-mode TPG authentication context support (nab)
   - Fix isert_put_reject payload buffer post (nab)
   - Fix iscsit_add_reject* usage for iser (nab)
   - Fix iscsit_sequence_cmd reject handling for iser (nab)
   - Fix ISCSI_OP_SCSI_TMFUNC handling for iser (nab)
   - Fix session reset bug with RDMA_CM_EVENT_DISCONNECTED (nab)

  The last five iscsi/iser-target items are CC'ed to stable, as they do
  address issues present in v3.10 code.  They are certainly larger than
  I'd like for stable patch set, but are important to ensure proper
  REJECT exception handling in iser-target for 3.10.y"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (51 commits)
  iser-target: Ignore non TEXT + LOGOUT opcodes for discovery
  target: make queue_tm_rsp() return void
  target: remove unused codes from enum tcm_tmrsp_table
  iscsi-target: kstrtou* configfs attribute parameter cleanups
  iscsi-target: Fix tfc_tpg_auth_cit configfs length overflow
  iscsi-target: Fix tfc_tpg_nacl_auth_cit configfs length overflow
  iser-target: Add support for ISCSI_OP_TEXT opcode + payload handling
  iser-target: Rename sense_buf_[dma,len] to pdu_[dma,len]
  iser-target: Add vendor_err debug output
  target: Add (obsolete) checking for PMI/LBA fields in READ CAPACITY(10)
  target: Return correct sense data for IO past the end of a device
  target: Add tracepoints for SCSI commands being processed
  iser-target: Fix session reset bug with RDMA_CM_EVENT_DISCONNECTED
  iscsi-target: Fix ISCSI_OP_SCSI_TMFUNC handling for iser
  iscsi-target: Fix iscsit_sequence_cmd reject handling for iser
  iscsi-target: Fix iscsit_add_reject* usage for iser
  iser-target: Fix isert_put_reject payload buffer post
  iscsi-target: missing kfree() on error path
  iscsi-target: Drop left-over iscsi_conn->bad_hdr
  target: Make core_scsi3_update_and_write_aptpl return sense_reason_t
  ...
This commit is contained in:
Linus Torvalds 2013-07-11 12:57:19 -07:00
commit 6d2fa9e141
36 changed files with 1415 additions and 1086 deletions

View File

@ -388,6 +388,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
init_waitqueue_head(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
cma_id->context = isert_conn;
isert_conn->conn_cm_id = cma_id;
@ -540,15 +541,32 @@ isert_disconnect_work(struct work_struct *work)
struct isert_conn, conn_logout_work);
pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
mutex_lock(&isert_conn->conn_mutex);
isert_conn->state = ISER_CONN_DOWN;
if (isert_conn->post_recv_buf_count == 0 &&
atomic_read(&isert_conn->post_send_buf_count) == 0) {
pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
wake_up(&isert_conn->conn_wait);
mutex_unlock(&isert_conn->conn_mutex);
goto wake_up;
}
if (!isert_conn->conn_cm_id) {
mutex_unlock(&isert_conn->conn_mutex);
isert_put_conn(isert_conn);
return;
}
if (!isert_conn->logout_posted) {
pr_debug("Calling rdma_disconnect for !logout_posted from"
" isert_disconnect_work\n");
rdma_disconnect(isert_conn->conn_cm_id);
mutex_unlock(&isert_conn->conn_mutex);
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
goto wake_up;
}
mutex_unlock(&isert_conn->conn_mutex);
wake_up:
wake_up(&isert_conn->conn_wait);
isert_put_conn(isert_conn);
}
@ -934,16 +952,11 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
}
sequence_cmd:
rc = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
if (!rc && dump_payload == false && unsol_data)
iscsit_set_unsoliticed_dataout(cmd);
if (rc == CMDSN_ERROR_CANNOT_RECOVER)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_PROTOCOL_ERROR,
1, 0, (unsigned char *)hdr, cmd);
return 0;
}
@ -1000,6 +1013,52 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
return 0;
}
static int
isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
struct iser_rx_desc *rx_desc, unsigned char *buf)
{
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
int rc;
rc = iscsit_setup_nop_out(conn, cmd, hdr);
if (rc < 0)
return rc;
/*
* FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
*/
return iscsit_process_nop_out(conn, cmd, hdr);
}
static int
isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
struct iser_rx_desc *rx_desc, struct iscsi_text *hdr)
{
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct iscsi_conn *conn = isert_conn->conn;
u32 payload_length = ntoh24(hdr->dlength);
int rc;
unsigned char *text_in;
rc = iscsit_setup_text_cmd(conn, cmd, hdr);
if (rc < 0)
return rc;
text_in = kzalloc(payload_length, GFP_KERNEL);
if (!text_in) {
pr_err("Unable to allocate text_in of payload_length: %u\n",
payload_length);
return -ENOMEM;
}
cmd->text_in_ptr = text_in;
memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
return iscsit_process_text_cmd(conn, cmd, hdr);
}
static int
isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
uint32_t read_stag, uint64_t read_va,
@ -1007,11 +1066,19 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
{
struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_session *sess = conn->sess;
struct iscsi_cmd *cmd;
struct isert_cmd *isert_cmd;
int ret = -EINVAL;
u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
if (sess->sess_ops->SessionType &&
(!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
" ignoring\n", opcode);
return 0;
}
switch (opcode) {
case ISCSI_OP_SCSI_CMD:
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
@ -1032,7 +1099,9 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
if (!cmd)
break;
ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr);
isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
ret = isert_handle_nop_out(isert_conn, isert_cmd,
rx_desc, (unsigned char *)hdr);
break;
case ISCSI_OP_SCSI_DATA_OUT:
ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
@ -1057,6 +1126,15 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
SECONDS_FOR_LOGOUT_COMP *
HZ);
break;
case ISCSI_OP_TEXT:
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
break;
isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
ret = isert_handle_text_cmd(isert_conn, isert_cmd,
rx_desc, (struct iscsi_text *)hdr);
break;
default:
pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
dump_stack();
@ -1184,14 +1262,12 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
{
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
struct iscsi_conn *conn;
struct iscsi_conn *conn = isert_conn->conn;
pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
conn = isert_conn->conn;
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del(&cmd->i_conn_node);
@ -1201,16 +1277,19 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
iscsit_stop_dataout_timer(cmd);
isert_unmap_cmd(isert_cmd, isert_conn);
/*
* Fall-through
*/
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_SCSI_TMFUNC:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_REJECT:
case ISCSI_OP_NOOP_OUT:
conn = isert_conn->conn;
case ISCSI_OP_TEXT:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del(&cmd->i_conn_node);
@ -1222,6 +1301,9 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
* associated cmd->se_cmd needs to be released.
*/
if (cmd->se_cmd.se_tfo != NULL) {
pr_debug("Calling transport_generic_free_cmd from"
" isert_put_cmd for 0x%02x\n",
cmd->iscsi_opcode);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
@ -1249,11 +1331,11 @@ static void
isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
struct ib_device *ib_dev)
{
if (isert_cmd->sense_buf_dma != 0) {
pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma,
isert_cmd->sense_buf_len, DMA_TO_DEVICE);
isert_cmd->sense_buf_dma = 0;
if (isert_cmd->pdu_buf_dma != 0) {
pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
isert_cmd->pdu_buf_dma = 0;
}
isert_unmap_tx_desc(tx_desc, ib_dev);
@ -1318,8 +1400,8 @@ isert_do_control_comp(struct work_struct *work)
atomic_dec(&isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
complete(&cmd->reject_comp);
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
break;
case ISTATE_SEND_LOGOUTRSP:
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
/*
@ -1329,6 +1411,11 @@ isert_do_control_comp(struct work_struct *work)
isert_conn->logout_posted = true;
iscsit_logout_post_handler(cmd, cmd->conn);
break;
case ISTATE_SEND_TEXTRSP:
atomic_dec(&isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
break;
default:
pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
dump_stack();
@ -1345,7 +1432,9 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP) {
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
cmd->i_state == ISTATE_SEND_REJECT ||
cmd->i_state == ISTATE_SEND_TEXTRSP) {
isert_unmap_tx_desc(tx_desc, ib_dev);
INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
@ -1419,7 +1508,11 @@ isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
pr_debug("Calling wake_up from isert_cq_comp_err\n");
isert_conn->state = ISER_CONN_TERMINATING;
mutex_lock(&isert_conn->conn_mutex);
if (isert_conn->state != ISER_CONN_DOWN)
isert_conn->state = ISER_CONN_TERMINATING;
mutex_unlock(&isert_conn->conn_mutex);
wake_up(&isert_conn->conn_wait_comp_err);
}
}
@ -1445,6 +1538,7 @@ isert_cq_tx_work(struct work_struct *work)
} else {
pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
pr_debug("TX wc.status: 0x%08x\n", wc.status);
pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
atomic_dec(&isert_conn->post_send_buf_count);
isert_cq_comp_err(tx_desc, isert_conn);
}
@ -1484,9 +1578,11 @@ isert_cq_rx_work(struct work_struct *work)
isert_rx_completion(rx_desc, isert_conn, xfer_len);
} else {
pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
if (wc.status != IB_WC_WR_FLUSH_ERR)
if (wc.status != IB_WC_WR_FLUSH_ERR) {
pr_debug("RX wc.status: 0x%08x\n", wc.status);
pr_debug("RX wc.vendor_err: 0x%08x\n",
wc.vendor_err);
}
isert_conn->post_recv_buf_count--;
isert_cq_comp_err(NULL, isert_conn);
}
@ -1543,7 +1639,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
(cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
u32 padding, sense_len;
u32 padding, pdu_len;
put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
cmd->sense_buffer);
@ -1551,15 +1647,15 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
padding = -(cmd->se_cmd.scsi_sense_length) & 3;
hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
sense_len = cmd->se_cmd.scsi_sense_length + padding;
pdu_len = cmd->se_cmd.scsi_sense_length + padding;
isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->sense_buffer, sense_len,
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->sense_buffer, pdu_len,
DMA_TO_DEVICE);
isert_cmd->sense_buf_len = sense_len;
tx_dsg->addr = isert_cmd->sense_buf_dma;
tx_dsg->length = sense_len;
isert_cmd->pdu_buf_len = pdu_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = pdu_len;
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
@ -1637,11 +1733,25 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
struct isert_cmd, iscsi_cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
struct iscsi_reject *hdr =
(struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_reject(cmd, conn, (struct iscsi_reject *)
&isert_cmd->tx_desc.iscsi_header);
iscsit_build_reject(cmd, conn, hdr);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
hton24(hdr->dlength, ISCSI_HDR_LEN);
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
DMA_TO_DEVICE);
isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = ISCSI_HDR_LEN;
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@ -1649,6 +1759,47 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = container_of(cmd,
struct isert_cmd, iscsi_cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct iscsi_text_rsp *hdr =
(struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
u32 txt_rsp_len;
int rc;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
rc = iscsit_build_text_rsp(cmd, conn, hdr);
if (rc < 0)
return rc;
txt_rsp_len = rc;
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
if (txt_rsp_len) {
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
void *txt_rsp_buf = cmd->buf_ptr;
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
isert_cmd->pdu_buf_len = txt_rsp_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = txt_rsp_len;
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
isert_init_send_wr(isert_cmd, send_wr);
pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
@ -1947,6 +2098,9 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
case ISTATE_SEND_REJECT:
ret = isert_put_reject(cmd, conn);
break;
case ISTATE_SEND_TEXTRSP:
ret = isert_put_text_rsp(cmd, conn);
break;
case ISTATE_SEND_STATUS:
/*
* Special case for sending non GOOD SCSI status from TX thread
@ -2175,6 +2329,17 @@ isert_free_np(struct iscsi_np *np)
kfree(isert_np);
}
static int isert_check_state(struct isert_conn *isert_conn, int state)
{
int ret;
mutex_lock(&isert_conn->conn_mutex);
ret = (isert_conn->state == state);
mutex_unlock(&isert_conn->conn_mutex);
return ret;
}
static void isert_free_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
@ -2184,26 +2349,43 @@ static void isert_free_conn(struct iscsi_conn *conn)
* Decrement post_send_buf_count for special case when called
* from isert_do_control_comp() -> iscsit_logout_post_handler()
*/
mutex_lock(&isert_conn->conn_mutex);
if (isert_conn->logout_posted)
atomic_dec(&isert_conn->post_send_buf_count);
if (isert_conn->conn_cm_id)
if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
pr_debug("Calling rdma_disconnect from isert_free_conn\n");
rdma_disconnect(isert_conn->conn_cm_id);
}
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
*/
if (isert_conn->state > ISER_CONN_INIT) {
if (isert_conn->state == ISER_CONN_UP) {
pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
isert_conn->state);
wait_event(isert_conn->conn_wait_comp_err,
isert_conn->state == ISER_CONN_TERMINATING);
pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n");
}
mutex_unlock(&isert_conn->conn_mutex);
pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn->state);
wait_event(isert_conn->conn_wait, isert_conn->state == ISER_CONN_DOWN);
pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n");
wait_event(isert_conn->conn_wait_comp_err,
(isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
wait_event(isert_conn->conn_wait,
(isert_check_state(isert_conn, ISER_CONN_DOWN)));
isert_put_conn(isert_conn);
return;
}
if (isert_conn->state == ISER_CONN_INIT) {
mutex_unlock(&isert_conn->conn_mutex);
isert_put_conn(isert_conn);
return;
}
pr_debug("isert_free_conn: wait_event conn_wait %d\n",
isert_conn->state);
mutex_unlock(&isert_conn->conn_mutex);
wait_event(isert_conn->conn_wait,
(isert_check_state(isert_conn, ISER_CONN_DOWN)));
isert_put_conn(isert_conn);
}

View File

@ -61,8 +61,8 @@ struct isert_cmd {
uint32_t write_stag;
uint64_t read_va;
uint64_t write_va;
u64 sense_buf_dma;
u32 sense_buf_len;
u64 pdu_buf_dma;
u32 pdu_buf_len;
u32 read_va_off;
u32 write_va_off;
u32 rdma_wr_num;
@ -102,6 +102,7 @@ struct isert_conn {
struct ib_qp *conn_qp;
struct isert_device *conn_device;
struct work_struct conn_logout_work;
struct mutex conn_mutex;
wait_queue_head_t conn_wait;
wait_queue_head_t conn_wait_comp_err;
struct kref conn_kref;

View File

@ -3011,7 +3011,7 @@ static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
* Callback function called by the TCM core. Must not block since it can be
* invoked on the context of the IB completion handler.
*/
static int srpt_queue_response(struct se_cmd *cmd)
static void srpt_queue_response(struct se_cmd *cmd)
{
struct srpt_rdma_ch *ch;
struct srpt_send_ioctx *ioctx;
@ -3022,8 +3022,6 @@ static int srpt_queue_response(struct se_cmd *cmd)
int resp_len;
u8 srp_tm_status;
ret = 0;
ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
ch = ioctx->ch;
BUG_ON(!ch);
@ -3049,7 +3047,7 @@ static int srpt_queue_response(struct se_cmd *cmd)
|| WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
atomic_inc(&ch->req_lim_delta);
srpt_abort_cmd(ioctx);
goto out;
return;
}
dir = ioctx->cmd.data_direction;
@ -3061,7 +3059,7 @@ static int srpt_queue_response(struct se_cmd *cmd)
if (ret) {
printk(KERN_ERR "xfer_data failed for tag %llu\n",
ioctx->tag);
goto out;
return;
}
}
@ -3082,9 +3080,17 @@ static int srpt_queue_response(struct se_cmd *cmd)
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
}
}
out:
return ret;
static int srpt_queue_data_in(struct se_cmd *cmd)
{
srpt_queue_response(cmd);
return 0;
}
static void srpt_queue_tm_rsp(struct se_cmd *cmd)
{
srpt_queue_response(cmd);
}
static int srpt_queue_status(struct se_cmd *cmd)
@ -3097,7 +3103,8 @@ static int srpt_queue_status(struct se_cmd *cmd)
(SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
ioctx->queue_status_only = true;
return srpt_queue_response(cmd);
srpt_queue_response(cmd);
return 0;
}
static void srpt_refresh_port_work(struct work_struct *work)
@ -3930,9 +3937,9 @@ static struct target_core_fabric_ops srpt_template = {
.set_default_node_attributes = srpt_set_default_node_attrs,
.get_task_tag = srpt_get_task_tag,
.get_cmd_state = srpt_get_tcm_cmd_state,
.queue_data_in = srpt_queue_response,
.queue_data_in = srpt_queue_data_in,
.queue_status = srpt_queue_status,
.queue_tm_rsp = srpt_queue_response,
.queue_tm_rsp = srpt_queue_tm_rsp,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c

View File

@ -544,102 +544,6 @@ out_free_id_list:
return res;
}
static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
struct qla_tgt_sess *sess)
{
struct qla_hw_data *ha = vha->hw;
struct qla_port_24xx_data *pmap24;
bool res, found = false;
int rc, i;
uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
uint16_t entries;
void *pmap;
int pmap_len;
fc_port_t *fcport;
int global_resets;
unsigned long flags;
retry:
global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
if (rc != QLA_SUCCESS) {
res = false;
goto out;
}
pmap24 = pmap;
entries = pmap_len/sizeof(*pmap24);
for (i = 0; i < entries; ++i) {
if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
loop_id = le16_to_cpu(pmap24[i].loop_id);
found = true;
break;
}
}
kfree(pmap);
if (!found) {
res = false;
goto out;
}
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
"qlt_check_fcport_exist(): loop_id %d", loop_id);
fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
if (fcport == NULL) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
"qla_target(%d): Allocation of tmp FC port failed",
vha->vp_idx);
res = false;
goto out;
}
fcport->loop_id = loop_id;
rc = qla2x00_get_port_database(vha, fcport, 0);
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
"qla_target(%d): Failed to retrieve fcport "
"information -- get_port_database() returned %x "
"(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
res = false;
goto out_free_fcport;
}
if (global_resets !=
atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
"qla_target(%d): global reset during session discovery"
" (counter was %d, new %d), retrying",
vha->vp_idx, global_resets,
atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
goto retry;
}
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
"Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
"loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
(fcport->flags & FCF_CONF_COMP_SUPPORTED));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
res = true;
out_free_fcport:
kfree(fcport);
out:
return res;
}
/* ha->hardware_lock supposed to be held on entry */
static void qlt_undelete_sess(struct qla_tgt_sess *sess)
{
@ -663,43 +567,13 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
del_list_entry);
if (time_after_eq(jiffies, sess->expires)) {
bool cancel;
qlt_undelete_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
cancel = qlt_check_fcport_exist(vha, sess);
if (cancel) {
if (sess->deleted) {
/*
* sess was again deleted while we were
* discovering it
*/
spin_lock_irqsave(&ha->hardware_lock,
flags);
continue;
}
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
"qla_target(%d): cancel deletion of "
"session for port %02x:%02x:%02x:%02x:%02x:"
"%02x:%02x:%02x (loop ID %d), because "
" it isn't deleted by firmware",
vha->vp_idx, sess->port_name[0],
sess->port_name[1], sess->port_name[2],
sess->port_name[3], sess->port_name[4],
sess->port_name[5], sess->port_name[6],
sess->port_name[7], sess->loop_id);
} else {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"Timeout: sess %p about to be deleted\n",
sess);
ha->tgt.tgt_ops->shutdown_sess(sess);
ha->tgt.tgt_ops->put_sess(sess);
}
spin_lock_irqsave(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"Timeout: sess %p about to be deleted\n",
sess);
ha->tgt.tgt_ops->shutdown_sess(sess);
ha->tgt.tgt_ops->put_sess(sess);
} else {
schedule_delayed_work(&tgt->sess_del_work,
jiffies - sess->expires);
@ -884,9 +758,8 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
sess->loop_id);
sess->local = 0;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
@ -2706,7 +2579,9 @@ static void qlt_do_work(struct work_struct *work)
/*
* Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return;
out_term:
@ -2718,9 +2593,9 @@ out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
kmem_cache_free(qla_tgt_cmd_cachep, cmd);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
/* ha->hardware_lock supposed to be held on entry */
@ -4169,16 +4044,16 @@ static void qlt_abort_work(struct qla_tgt *tgt,
rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
if (rc != 0)
goto out_term;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return;
out_term:
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void qlt_tmr_work(struct qla_tgt *tgt,
@ -4226,16 +4101,16 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
if (rc != 0)
goto out_term;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return;
out_term:
qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void qlt_sess_work_fn(struct work_struct *work)

View File

@ -703,7 +703,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
}
static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
@ -735,8 +735,6 @@ static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
* CTIO response packet.
*/
qlt_xmit_tm_rsp(mcmd);
return 0;
}
/* Local pointer to allocated TCM configfs fabric module */
@ -799,12 +797,14 @@ static void tcm_qla2xxx_put_session(struct se_session *se_sess)
static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
{
tcm_qla2xxx_put_session(sess->se_sess);
assert_spin_locked(&sess->vha->hw->hardware_lock);
kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
}
static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
{
tcm_qla2xxx_shutdown_session(sess->se_sess);
assert_spin_locked(&sess->vha->hw->hardware_lock);
target_sess_cmd_list_set_waiting(sess->se_sess);
}
static struct se_node_acl *tcm_qla2xxx_make_nodeacl(

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,7 @@ extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
struct iscsi_portal_group *);
extern int iscsit_del_np(struct iscsi_np *);
extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);

View File

@ -20,6 +20,7 @@
****************************************************************************/
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/inet.h>
#include <target/target_core_base.h>
@ -78,11 +79,12 @@ static ssize_t lio_target_np_store_sctp(
struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
struct iscsi_tpg_np, se_tpg_np);
struct iscsi_tpg_np *tpg_np_sctp = NULL;
char *endptr;
u32 op;
int ret;
op = simple_strtoul(page, &endptr, 0);
ret = kstrtou32(page, 0, &op);
if (ret)
return ret;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %u\n", op);
return -EINVAL;
@ -382,11 +384,12 @@ static ssize_t iscsi_nacl_attrib_store_##name( \
{ \
struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
se_node_acl); \
char *endptr; \
u32 val; \
int ret; \
\
val = simple_strtoul(page, &endptr, 0); \
ret = kstrtou32(page, 0, &val); \
if (ret) \
return ret; \
ret = iscsit_na_##name(nacl, val); \
if (ret < 0) \
return ret; \
@ -474,7 +477,7 @@ static ssize_t __iscsi_##prefix##_store_##name( \
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
snprintf(auth->name, PAGE_SIZE, "%s", page); \
snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!strncmp("NULL", auth->name, 4)) \
auth->naf_flags &= ~flags; \
else \
@ -789,11 +792,12 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
struct iscsi_portal_group *tpg = container_of(se_tpg,
struct iscsi_portal_group, tpg_se_tpg);
struct config_item *acl_ci, *tpg_ci, *wwn_ci;
char *endptr;
u32 cmdsn_depth = 0;
int ret;
cmdsn_depth = simple_strtoul(page, &endptr, 0);
ret = kstrtou32(page, 0, &cmdsn_depth);
if (ret)
return ret;
if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
pr_err("Passed cmdsn_depth: %u exceeds"
" TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
@ -977,14 +981,15 @@ static ssize_t iscsi_tpg_attrib_store_##name( \
{ \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
char *endptr; \
u32 val; \
int ret; \
\
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
val = simple_strtoul(page, &endptr, 0); \
ret = kstrtou32(page, 0, &val); \
if (ret) \
goto out; \
ret = iscsit_ta_##name(tpg, val); \
if (ret < 0) \
goto out; \
@ -1053,6 +1058,131 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
/* End items for lio_target_tpg_attrib_cit */
/* Start items for lio_target_tpg_auth_cit */
#define __DEF_TPG_AUTH_STR(prefix, name, flags) \
static ssize_t __iscsi_##prefix##_show_##name( \
struct se_portal_group *se_tpg, \
char *page) \
{ \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
} \
\
static ssize_t __iscsi_##prefix##_store_##name( \
struct se_portal_group *se_tpg, \
const char *page, \
size_t count) \
{ \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!(strncmp("NULL", auth->name, 4))) \
auth->naf_flags &= ~flags; \
else \
auth->naf_flags |= flags; \
\
if ((auth->naf_flags & NAF_USERID_IN_SET) && \
(auth->naf_flags & NAF_PASSWORD_IN_SET)) \
auth->authenticate_target = 1; \
else \
auth->authenticate_target = 0; \
\
return count; \
}
#define __DEF_TPG_AUTH_INT(prefix, name) \
static ssize_t __iscsi_##prefix##_show_##name( \
struct se_portal_group *se_tpg, \
char *page) \
{ \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
}
#define DEF_TPG_AUTH_STR(name, flags) \
__DEF_TPG_AUTH_STR(tpg_auth, name, flags) \
static ssize_t iscsi_tpg_auth_show_##name( \
struct se_portal_group *se_tpg, \
char *page) \
{ \
return __iscsi_tpg_auth_show_##name(se_tpg, page); \
} \
\
static ssize_t iscsi_tpg_auth_store_##name( \
struct se_portal_group *se_tpg, \
const char *page, \
size_t count) \
{ \
return __iscsi_tpg_auth_store_##name(se_tpg, page, count); \
}
#define DEF_TPG_AUTH_INT(name) \
__DEF_TPG_AUTH_INT(tpg_auth, name) \
static ssize_t iscsi_tpg_auth_show_##name( \
struct se_portal_group *se_tpg, \
char *page) \
{ \
return __iscsi_tpg_auth_show_##name(se_tpg, page); \
}
#define TPG_AUTH_ATTR(_name, _mode) TF_TPG_AUTH_ATTR(iscsi, _name, _mode);
#define TPG_AUTH_ATTR_RO(_name) TF_TPG_AUTH_ATTR_RO(iscsi, _name);
/*
* * One-way authentication userid
* */
DEF_TPG_AUTH_STR(userid, NAF_USERID_SET);
TPG_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
/*
* * One-way authentication password
* */
DEF_TPG_AUTH_STR(password, NAF_PASSWORD_SET);
TPG_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
/*
* * Enforce mutual authentication
* */
DEF_TPG_AUTH_INT(authenticate_target);
TPG_AUTH_ATTR_RO(authenticate_target);
/*
* * Mutual authentication userid
* */
DEF_TPG_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
TPG_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
/*
* * Mutual authentication password
* */
DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
TPG_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_auth_attrs[] = {
&iscsi_tpg_auth_userid.attr,
&iscsi_tpg_auth_password.attr,
&iscsi_tpg_auth_authenticate_target.attr,
&iscsi_tpg_auth_userid_mutual.attr,
&iscsi_tpg_auth_password_mutual.attr,
NULL,
};
/* End items for lio_target_tpg_auth_cit */
/* Start items for lio_target_tpg_param_cit */
#define DEF_TPG_PARAM(name) \
@ -1087,13 +1217,14 @@ static ssize_t iscsi_tpg_param_store_##name( \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
char *buf; \
int ret; \
int ret, len; \
\
buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
if (!buf) \
return -ENOMEM; \
snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
buf[strlen(buf)-1] = '\0'; /* Kill newline */ \
len = snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
if (isspace(buf[len-1])) \
buf[len-1] = '\0'; /* Kill newline */ \
\
if (iscsit_get_tpg(tpg) < 0) { \
kfree(buf); \
@ -1230,11 +1361,12 @@ static ssize_t lio_target_tpg_store_enable(
{
struct iscsi_portal_group *tpg = container_of(se_tpg,
struct iscsi_portal_group, tpg_se_tpg);
char *endptr;
u32 op;
int ret = 0;
int ret;
op = simple_strtoul(page, &endptr, 0);
ret = kstrtou32(page, 0, &op);
if (ret)
return ret;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %u\n", op);
return -EINVAL;
@ -1282,15 +1414,15 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
char *tpgt_str, *end_ptr;
int ret = 0;
unsigned short int tpgt;
char *tpgt_str;
int ret;
u16 tpgt;
tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
/*
* Only tpgt_# directory groups can be created below
* target/iscsi/iqn.superturodiskarry/
*/
*/
tpgt_str = strstr(name, "tpgt_");
if (!tpgt_str) {
pr_err("Unable to locate \"tpgt_#\" directory"
@ -1298,7 +1430,9 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
return NULL;
}
tpgt_str += 5; /* Skip ahead of "tpgt_" */
tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
ret = kstrtou16(tpgt_str, 0, &tpgt);
if (ret)
return NULL;
tpg = iscsit_alloc_portal_group(tiqn, tpgt);
if (!tpg)
@ -1506,10 +1640,12 @@ static ssize_t iscsi_disc_store_enforce_discovery_auth(
{
struct iscsi_param *param;
struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
char *endptr;
u32 op;
int err;
op = simple_strtoul(page, &endptr, 0);
err = kstrtou32(page, 0, &op);
if (err)
return -EINVAL;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for enforce_discovery_auth:"
" %u\n", op);
@ -1655,13 +1791,12 @@ static int lio_queue_status(struct se_cmd *se_cmd)
return 0;
}
static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
cmd->i_state = ISTATE_SEND_TASKMGTRSP;
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
return 0;
}
static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
@ -1866,6 +2001,7 @@ int iscsi_target_register_configfs(void)
TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;

View File

@ -132,7 +132,8 @@ enum cmd_flags_table {
ICF_CONTIG_MEMORY = 0x00000020,
ICF_ATTACHED_TO_RQUEUE = 0x00000040,
ICF_OOO_CMDSN = 0x00000080,
ICF_REJECT_FAIL_CONN = 0x00000100,
IFC_SENDTARGETS_ALL = 0x00000100,
IFC_SENDTARGETS_SINGLE = 0x00000200,
};
/* struct iscsi_cmd->i_state */
@ -366,6 +367,8 @@ struct iscsi_cmd {
u8 maxcmdsn_inc;
/* Immediate Unsolicited Dataout */
u8 unsolicited_data;
/* Reject reason code */
u8 reject_reason;
/* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
u16 logout_cid;
/* Command flags */
@ -427,6 +430,8 @@ struct iscsi_cmd {
u32 tx_size;
/* Buffer used for various purposes */
void *buf_ptr;
/* Used by SendTargets=[iqn.,eui.] discovery */
void *text_in_ptr;
/* See include/linux/dma-mapping.h */
enum dma_data_direction data_direction;
/* iSCSI PDU Header + CRC */
@ -446,7 +451,6 @@ struct iscsi_cmd {
struct list_head datain_list;
/* R2T List */
struct list_head cmd_r2t_list;
struct completion reject_comp;
/* Timer for DataOUT */
struct timer_list dataout_timer;
/* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
@ -528,8 +532,6 @@ struct iscsi_conn {
u32 of_marker;
/* Used for calculating OFMarker offset to next PDU */
u32 of_marker_offset;
/* Complete Bad PDU for sending reject */
unsigned char bad_hdr[ISCSI_HDR_LEN];
#define IPV6_ADDRESS_SPACE 48
unsigned char login_ip[IPV6_ADDRESS_SPACE];
unsigned char local_ip[IPV6_ADDRESS_SPACE];
@ -809,6 +811,7 @@ struct iscsi_portal_group {
struct mutex tpg_access_lock;
struct mutex np_login_lock;
struct iscsi_tpg_attrib tpg_attrib;
struct iscsi_node_auth tpg_demo_auth;
/* Pointer to default list of iSCSI parameters for TPG */
struct iscsi_param_list *param_list;
struct iscsi_tiqn *tpg_tiqn;

View File

@ -746,13 +746,12 @@ int iscsit_check_post_dataout(
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from DataOUT CRC"
" failure while ERL=0, closing session.\n");
iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
1, 0, buf, cmd);
iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
buf);
return DATAOUT_CANNOT_RECOVER;
}
iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
0, 0, buf, cmd);
iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf);
return iscsit_dataout_post_crc_failed(cmd, buf);
}
}
@ -909,6 +908,7 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
wait_for_completion(&conn->conn_wait_comp);
complete(&conn->conn_post_wait_comp);
}
EXPORT_SYMBOL(iscsit_cause_connection_reinstatement);
void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
{

View File

@ -162,9 +162,8 @@ static int iscsit_handle_r2t_snack(
" protocol error.\n", cmd->init_task_tag, begrun,
(begrun + runlength), cmd->acked_data_sn);
return iscsit_add_reject_from_cmd(
ISCSI_REASON_PROTOCOL_ERROR,
1, 0, buf, cmd);
return iscsit_reject_cmd(cmd,
ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if (runlength) {
@ -173,8 +172,8 @@ static int iscsit_handle_r2t_snack(
" with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
" current R2TSN: 0x%08x, protocol error.\n",
cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
return iscsit_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
last_r2tsn = (begrun + runlength);
} else
@ -433,8 +432,7 @@ static int iscsit_handle_recovery_datain(
" protocol error.\n", cmd->init_task_tag, begrun,
(begrun + runlength), cmd->acked_data_sn);
return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
1, 0, buf, cmd);
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
/*
@ -445,14 +443,14 @@ static int iscsit_handle_recovery_datain(
pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
": 0x%08x greater than maximum DataSN: 0x%08x.\n",
begrun, runlength, (cmd->data_sn - 1));
return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
1, 0, buf, cmd);
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID,
buf);
}
dr = iscsit_allocate_datain_req();
if (!dr)
return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 0, buf, cmd);
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
buf);
dr->data_sn = dr->begrun = begrun;
dr->runlength = runlength;
@ -1090,7 +1088,7 @@ int iscsit_handle_ooo_cmdsn(
ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
if (!ooo_cmdsn)
return CMDSN_ERROR_CANNOT_RECOVER;
return -ENOMEM;
ooo_cmdsn->cmd = cmd;
ooo_cmdsn->batch_count = (batch) ?
@ -1101,10 +1099,10 @@ int iscsit_handle_ooo_cmdsn(
if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
return CMDSN_ERROR_CANNOT_RECOVER;
return -ENOMEM;
}
return CMDSN_HIGHER_THAN_EXP;
return 0;
}
static int iscsit_set_dataout_timeout_values(

View File

@ -112,6 +112,7 @@ static u32 iscsi_handle_authentication(
struct iscsi_session *sess = conn->sess;
struct iscsi_node_auth *auth;
struct iscsi_node_acl *iscsi_nacl;
struct iscsi_portal_group *iscsi_tpg;
struct se_node_acl *se_nacl;
if (!sess->sess_ops->SessionType) {
@ -132,7 +133,17 @@ static u32 iscsi_handle_authentication(
return -1;
}
auth = ISCSI_NODE_AUTH(iscsi_nacl);
if (se_nacl->dynamic_node_acl) {
iscsi_tpg = container_of(se_nacl->se_tpg,
struct iscsi_portal_group, tpg_se_tpg);
auth = &iscsi_tpg->tpg_demo_auth;
} else {
iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
se_node_acl);
auth = ISCSI_NODE_AUTH(iscsi_nacl);
}
} else {
/*
* For SessionType=Discovery

View File

@ -1799,9 +1799,6 @@ void iscsi_set_connection_parameters(
* this key is not sent over the wire.
*/
if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
if (param_list->iser == true)
continue;
ops->MaxXmitDataSegmentLength =
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("MaxXmitDataSegmentLength: %s\n",

View File

@ -178,7 +178,6 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
INIT_LIST_HEAD(&cmd->i_conn_node);
INIT_LIST_HEAD(&cmd->datain_list);
INIT_LIST_HEAD(&cmd->cmd_r2t_list);
init_completion(&cmd->reject_comp);
spin_lock_init(&cmd->datain_lock);
spin_lock_init(&cmd->dataout_timeout_lock);
spin_lock_init(&cmd->istate_lock);
@ -284,13 +283,12 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
* Commands may be received out of order if MC/S is in use.
* Ensure they are executed in CmdSN order.
*/
int iscsit_sequence_cmd(
struct iscsi_conn *conn,
struct iscsi_cmd *cmd,
__be32 cmdsn)
int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
unsigned char *buf, __be32 cmdsn)
{
int ret;
int cmdsn_ret;
int ret, cmdsn_ret;
bool reject = false;
u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
mutex_lock(&conn->sess->cmdsn_mutex);
@ -300,9 +298,19 @@ int iscsit_sequence_cmd(
ret = iscsit_execute_cmd(cmd, 0);
if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
iscsit_execute_ooo_cmdsns(conn->sess);
else if (ret < 0) {
reject = true;
ret = CMDSN_ERROR_CANNOT_RECOVER;
}
break;
case CMDSN_HIGHER_THAN_EXP:
ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
if (ret < 0) {
reject = true;
ret = CMDSN_ERROR_CANNOT_RECOVER;
break;
}
ret = CMDSN_HIGHER_THAN_EXP;
break;
case CMDSN_LOWER_THAN_EXP:
cmd->i_state = ISTATE_REMOVE;
@ -310,11 +318,16 @@ int iscsit_sequence_cmd(
ret = cmdsn_ret;
break;
default:
reason = ISCSI_REASON_PROTOCOL_ERROR;
reject = true;
ret = cmdsn_ret;
break;
}
mutex_unlock(&conn->sess->cmdsn_mutex);
if (reject)
iscsit_reject_cmd(cmd, reason, buf);
return ret;
}
EXPORT_SYMBOL(iscsit_sequence_cmd);
@ -681,6 +694,7 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
kfree(cmd->seq_list);
kfree(cmd->tmr_req);
kfree(cmd->iov_data);
kfree(cmd->text_in_ptr);
kmem_cache_free(lio_cmd_cache, cmd);
}

View File

@ -13,7 +13,8 @@ extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, __be32 cmdsn);
extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
unsigned char * ,__be32 cmdsn);
extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,

View File

@ -786,7 +786,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
return 0;
}
static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
@ -796,7 +796,6 @@ static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
*/
atomic_set(&tl_tmr->tmr_complete, 1);
wake_up(&tl_tmr->tl_tmr_wait);
return 0;
}
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)

View File

@ -1842,9 +1842,8 @@ static int sbp_queue_status(struct se_cmd *se_cmd)
return sbp_send_sense(req);
}
static int sbp_queue_tm_rsp(struct se_cmd *se_cmd)
static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
{
return 0;
}
static int sbp_check_stop_free(struct se_cmd *se_cmd)

View File

@ -983,7 +983,6 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
struct se_node_acl *se_nacl;
struct t10_pr_registration *pr_reg;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@ -992,12 +991,11 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
return sprintf(page, "No SPC-3 Reservation holder\n");
se_nacl = pr_reg->pr_reg_nacl;
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
se_nacl->initiatorname, i_buf);
}
static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
@ -1116,7 +1114,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
unsigned char buf[384];
char i_buf[PR_REG_ISID_ID_LEN];
ssize_t len = 0;
int reg_count = 0, prf_isid;
int reg_count = 0;
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
@ -1127,12 +1125,11 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
memset(buf, 0, 384);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
core_pr_dump_initiator_port(pr_reg, i_buf,
PR_REG_ISID_ID_LEN);
sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
&i_buf[0] : "", pr_reg->pr_res_key,
pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
pr_reg->pr_res_generation);
if (len + strlen(buf) >= PAGE_SIZE)

View File

@ -1410,7 +1410,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
dev->t10_wwn.t10_dev = dev;
dev->t10_alua.t10_dev = dev;
@ -1545,7 +1544,7 @@ int core_dev_setup_virtual_lun0(void)
{
struct se_hba *hba;
struct se_device *dev;
char buf[16];
char buf[] = "rd_pages=8,rd_nullio=1";
int ret;
hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
@ -1558,8 +1557,6 @@ int core_dev_setup_virtual_lun0(void)
goto out_free_hba;
}
memset(buf, 0, 16);
sprintf(buf, "rd_pages=8");
hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
ret = target_configure_device(dev);

View File

@ -965,6 +965,19 @@ TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
/* End of tfc_tpg_attrib_cit */
/* Start of tfc_tpg_auth_cit */
CONFIGFS_EATTR_OPS(target_fabric_tpg_auth, se_portal_group, tpg_auth_group);
static struct configfs_item_operations target_fabric_tpg_auth_item_ops = {
.show_attribute = target_fabric_tpg_auth_attr_show,
.store_attribute = target_fabric_tpg_auth_attr_store,
};
TF_CIT_SETUP(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL, NULL);
/* End of tfc_tpg_attrib_cit */
/* Start of tfc_tpg_param_cit */
CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
@ -1030,8 +1043,9 @@ static struct config_group *target_fabric_make_tpg(
se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
se_tpg->tpg_group.default_groups[5] = NULL;
se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_auth_group;
se_tpg->tpg_group.default_groups[5] = &se_tpg->tpg_param_group;
se_tpg->tpg_group.default_groups[6] = NULL;
config_group_init_type_name(&se_tpg->tpg_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
@ -1043,6 +1057,8 @@ static struct config_group *target_fabric_make_tpg(
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
&TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
&TF_CIT_TMPL(tf)->tfc_tpg_auth_cit);
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
&TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
@ -1202,6 +1218,7 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
target_fabric_setup_tpg_np_cit(tf);
target_fabric_setup_tpg_np_base_cit(tf);
target_fabric_setup_tpg_attrib_cit(tf);
target_fabric_setup_tpg_auth_cit(tf);
target_fabric_setup_tpg_param_cit(tf);
target_fabric_setup_tpg_nacl_cit(tf);
target_fabric_setup_tpg_nacl_base_cit(tf);

View File

@ -53,18 +53,28 @@ struct pr_transport_id_holder {
struct list_head dest_list;
};
int core_pr_dump_initiator_port(
void core_pr_dump_initiator_port(
struct t10_pr_registration *pr_reg,
char *buf,
u32 size)
{
if (!pr_reg->isid_present_at_reg)
return 0;
buf[0] = '\0';
snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
return 1;
snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
}
enum register_type {
REGISTER,
REGISTER_AND_IGNORE_EXISTING_KEY,
REGISTER_AND_MOVE,
};
enum preempt_type {
PREEMPT,
PREEMPT_AND_ABORT,
};
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
struct t10_pr_registration *, int);
@ -596,14 +606,6 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
return NULL;
}
pr_reg->pr_aptpl_buf = kzalloc(dev->t10_pr.pr_aptpl_buf_len,
GFP_ATOMIC);
if (!pr_reg->pr_aptpl_buf) {
pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
kmem_cache_free(t10_pr_reg_cache, pr_reg);
return NULL;
}
INIT_LIST_HEAD(&pr_reg->pr_reg_list);
INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
@ -794,7 +796,6 @@ int core_scsi3_alloc_aptpl_registration(
pr_err("Unable to allocate struct t10_pr_registration\n");
return -ENOMEM;
}
pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
INIT_LIST_HEAD(&pr_reg->pr_reg_list);
INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
@ -848,11 +849,9 @@ static void core_scsi3_aptpl_reserve(
struct t10_pr_registration *pr_reg)
{
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
spin_lock(&dev->dev_reservation_lock);
dev->dev_pr_res_holder = pr_reg;
@ -865,11 +864,11 @@ static void core_scsi3_aptpl_reserve(
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
i_buf);
}
static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
struct t10_pr_registration *, int, int);
struct t10_pr_registration *, enum register_type, int);
static int __core_scsi3_check_aptpl_registration(
struct se_device *dev,
@ -962,21 +961,19 @@ static void __core_scsi3_dump_registration(
struct se_device *dev,
struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg,
int register_type)
enum register_type register_type)
{
struct se_portal_group *se_tpg = nacl->se_tpg;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
" Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
"_AND_MOVE" : (register_type == 1) ?
" Node: %s%s\n", tfo->get_fabric_name(), (register_type == REGISTER_AND_MOVE) ?
"_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ?
"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
(prf_isid) ? i_buf : "");
i_buf);
pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
tfo->tpg_get_tag(se_tpg));
@ -998,7 +995,7 @@ static void __core_scsi3_add_registration(
struct se_device *dev,
struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg,
int register_type,
enum register_type register_type,
int register_move)
{
struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
@ -1064,7 +1061,7 @@ static int core_scsi3_alloc_registration(
u64 sa_res_key,
int all_tg_pt,
int aptpl,
int register_type,
enum register_type register_type,
int register_move)
{
struct t10_pr_registration *pr_reg;
@ -1225,11 +1222,9 @@ static void __core_scsi3_free_registration(
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
pr_reg->pr_reg_deve->def_pr_registered = 0;
pr_reg->pr_reg_deve->pr_res_key = 0;
@ -1257,7 +1252,7 @@ static void __core_scsi3_free_registration(
pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
" Node: %s%s\n", tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
i_buf);
pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
" Port(s)\n", tfo->get_fabric_name(),
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
@ -1269,7 +1264,6 @@ static void __core_scsi3_free_registration(
if (!preempt_and_abort_list) {
pr_reg->pr_reg_deve = NULL;
pr_reg->pr_reg_nacl = NULL;
kfree(pr_reg->pr_aptpl_buf);
kmem_cache_free(t10_pr_reg_cache, pr_reg);
return;
}
@ -1338,7 +1332,6 @@ void core_scsi3_free_all_registrations(
list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
pr_reg_aptpl_list) {
list_del(&pr_reg->pr_reg_aptpl_list);
kfree(pr_reg->pr_aptpl_buf);
kmem_cache_free(t10_pr_reg_cache, pr_reg);
}
spin_unlock(&pr_tmpl->aptpl_reg_lock);
@ -1453,7 +1446,7 @@ core_scsi3_decode_spec_i_port(
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
sense_reason_t ret;
u32 tpdl, tid_len = 0;
int dest_local_nexus, prf_isid;
int dest_local_nexus;
u32 dest_rtpi = 0;
memset(dest_iport, 0, 64);
@ -1764,8 +1757,7 @@ core_scsi3_decode_spec_i_port(
kfree(tidh);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(dest_pr_reg, i_buf, PR_REG_ISID_ID_LEN);
__core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
dest_pr_reg, 0, 0);
@ -1773,8 +1765,7 @@ core_scsi3_decode_spec_i_port(
pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
" registered Transport ID for Node: %s%s Mapped LUN:"
" %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, (prf_isid) ?
&i_buf[0] : "", dest_se_deve->mapped_lun);
dest_node_acl->initiatorname, i_buf, dest_se_deve->mapped_lun);
if (dest_local_nexus)
continue;
@ -1813,7 +1804,6 @@ out:
kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
}
kfree(dest_pr_reg->pr_aptpl_buf);
kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
if (dest_local_nexus)
@ -1826,14 +1816,10 @@ out:
return ret;
}
/*
* Called with struct se_device->dev_reservation_lock held
*/
static int __core_scsi3_update_aptpl_buf(
static int core_scsi3_update_aptpl_buf(
struct se_device *dev,
unsigned char *buf,
u32 pr_aptpl_buf_len,
int clear_aptpl_metadata)
u32 pr_aptpl_buf_len)
{
struct se_lun *lun;
struct se_portal_group *tpg;
@ -1841,20 +1827,13 @@ static int __core_scsi3_update_aptpl_buf(
unsigned char tmp[512], isid_buf[32];
ssize_t len = 0;
int reg_count = 0;
int ret = 0;
memset(buf, 0, pr_aptpl_buf_len);
/*
* Called to clear metadata once APTPL has been deactivated.
*/
if (clear_aptpl_metadata) {
snprintf(buf, pr_aptpl_buf_len,
"No Registrations or Reservations\n");
return 0;
}
spin_lock(&dev->dev_reservation_lock);
spin_lock(&dev->t10_pr.registration_lock);
/*
* Walk the registration list..
*/
spin_lock(&dev->t10_pr.registration_lock);
list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
@ -1900,8 +1879,8 @@ static int __core_scsi3_update_aptpl_buf(
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming"
" APTPL metadata\n");
spin_unlock(&dev->t10_pr.registration_lock);
return -EMSGSIZE;
ret = -EMSGSIZE;
goto out;
}
len += sprintf(buf+len, "%s", tmp);
@ -1918,48 +1897,32 @@ static int __core_scsi3_update_aptpl_buf(
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming"
" APTPL metadata\n");
spin_unlock(&dev->t10_pr.registration_lock);
return -EMSGSIZE;
ret = -EMSGSIZE;
goto out;
}
len += sprintf(buf+len, "%s", tmp);
reg_count++;
}
spin_unlock(&dev->t10_pr.registration_lock);
if (!reg_count)
len += sprintf(buf+len, "No Registrations or Reservations");
return 0;
}
static int core_scsi3_update_aptpl_buf(
struct se_device *dev,
unsigned char *buf,
u32 pr_aptpl_buf_len,
int clear_aptpl_metadata)
{
int ret;
spin_lock(&dev->dev_reservation_lock);
ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
clear_aptpl_metadata);
out:
spin_unlock(&dev->t10_pr.registration_lock);
spin_unlock(&dev->dev_reservation_lock);
return ret;
}
/*
* Called with struct se_device->aptpl_file_mutex held
*/
static int __core_scsi3_write_aptpl_to_file(
struct se_device *dev,
unsigned char *buf,
u32 pr_aptpl_buf_len)
unsigned char *buf)
{
struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
int flags = O_RDWR | O_CREAT | O_TRUNC;
char path[512];
u32 pr_aptpl_buf_len;
int ret;
memset(path, 0, 512);
@ -1978,8 +1941,7 @@ static int __core_scsi3_write_aptpl_to_file(
return PTR_ERR(file);
}
if (!pr_aptpl_buf_len)
pr_aptpl_buf_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
pr_aptpl_buf_len = (strlen(buf) + 1); /* Add extra for NULL */
ret = kernel_write(file, buf, pr_aptpl_buf_len, 0);
@ -1990,60 +1952,64 @@ static int __core_scsi3_write_aptpl_to_file(
return ret ? -EIO : 0;
}
static int
core_scsi3_update_and_write_aptpl(struct se_device *dev, unsigned char *in_buf,
u32 in_pr_aptpl_buf_len)
/*
* Clear the APTPL metadata if APTPL has been disabled, otherwise
* write out the updated metadata to struct file for this SCSI device.
*/
static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
{
unsigned char null_buf[64], *buf;
u32 pr_aptpl_buf_len;
int clear_aptpl_metadata = 0;
int ret;
unsigned char *buf;
int rc;
/*
* Can be called with a NULL pointer from PROUT service action CLEAR
*/
if (!in_buf) {
memset(null_buf, 0, 64);
buf = &null_buf[0];
/*
* This will clear the APTPL metadata to:
* "No Registrations or Reservations" status
*/
pr_aptpl_buf_len = 64;
clear_aptpl_metadata = 1;
} else {
buf = in_buf;
pr_aptpl_buf_len = in_pr_aptpl_buf_len;
if (!aptpl) {
char *null_buf = "No Registrations or Reservations\n";
rc = __core_scsi3_write_aptpl_to_file(dev, null_buf);
dev->t10_pr.pr_aptpl_active = 0;
pr_debug("SPC-3 PR: Set APTPL Bit Deactivated\n");
if (rc)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return 0;
}
ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
clear_aptpl_metadata);
if (ret != 0)
return ret;
buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
if (!buf)
return TCM_OUT_OF_RESOURCES;
/*
* __core_scsi3_write_aptpl_to_file() will call strlen()
* on the passed buf to determine pr_aptpl_buf_len.
*/
return __core_scsi3_write_aptpl_to_file(dev, buf, 0);
rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
if (rc < 0) {
kfree(buf);
return TCM_OUT_OF_RESOURCES;
}
rc = __core_scsi3_write_aptpl_to_file(dev, buf);
if (rc != 0) {
pr_err("SPC-3 PR: Could not update APTPL\n");
kfree(buf);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
dev->t10_pr.pr_aptpl_active = 1;
kfree(buf);
pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
return 0;
}
static sense_reason_t
core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
int aptpl, int all_tg_pt, int spec_i_pt, int ignore_key)
bool aptpl, bool all_tg_pt, bool spec_i_pt, enum register_type register_type)
{
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve;
struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
/* Used for APTPL metadata w/ UNREGISTER */
unsigned char *pr_aptpl_buf = NULL;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
sense_reason_t ret = TCM_NO_SENSE;
int pr_holder = 0, type;
int pr_holder = 0;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
@ -2061,8 +2027,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
/*
* Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
*/
pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
if (!pr_reg_e) {
pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
if (!pr_reg) {
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
@ -2083,7 +2049,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
if (core_scsi3_alloc_registration(cmd->se_dev,
se_sess->se_node_acl, se_deve, isid_ptr,
sa_res_key, all_tg_pt, aptpl,
ignore_key, 0)) {
register_type, 0)) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
return TCM_INVALID_PARAMETER_LIST;
@ -2102,97 +2068,68 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
if (ret != 0)
return ret;
}
/*
* Nothing left to do for the APTPL=0 case.
*/
if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
" REGISTER\n");
return 0;
}
/*
* Locate the newly allocated local I_T Nexus *pr_reg, and
* update the APTPL metadata information using its
* preallocated *pr_reg->pr_aptpl_buf.
*/
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
se_sess->se_node_acl, se_sess);
if (core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len)) {
pr_tmpl->pr_aptpl_active = 1;
pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
}
goto out_put_pr_reg;
return core_scsi3_update_and_write_aptpl(dev, aptpl);
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
pr_reg = pr_reg_e;
type = pr_reg->pr_res_type;
/* ok, existing registration */
if (!ignore_key) {
if (res_key != pr_reg->pr_res_key) {
pr_err("SPC-3 PR REGISTER: Received"
" res_key: 0x%016Lx does not match"
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key,
pr_reg->pr_res_key);
ret = TCM_RESERVATION_CONFLICT;
goto out_put_pr_reg;
}
if ((register_type == REGISTER) && (res_key != pr_reg->pr_res_key)) {
pr_err("SPC-3 PR REGISTER: Received"
" res_key: 0x%016Lx does not match"
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key,
pr_reg->pr_res_key);
ret = TCM_RESERVATION_CONFLICT;
goto out;
}
if (spec_i_pt) {
pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
pr_err("SPC-3 PR REGISTER: SPEC_I_PT"
" set on a registered nexus\n");
ret = TCM_INVALID_PARAMETER_LIST;
goto out_put_pr_reg;
goto out;
}
/*
* An existing ALL_TG_PT=1 registration being released
* must also set ALL_TG_PT=1 in the incoming PROUT.
*/
if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
if (pr_reg->pr_reg_all_tg_pt && !all_tg_pt) {
pr_err("SPC-3 PR REGISTER: ALL_TG_PT=1"
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
ret = TCM_INVALID_CDB_FIELD;
goto out_put_pr_reg;
goto out;
}
/*
* Allocate APTPL metadata buffer used for UNREGISTER ops
* sa_res_key=1 Change Reservation Key for registered I_T Nexus.
*/
if (aptpl) {
pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
GFP_KERNEL);
if (!pr_aptpl_buf) {
pr_err("Unable to allocate"
" pr_aptpl_buf\n");
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out_put_pr_reg;
}
}
if (sa_res_key) {
/*
* Increment PRgeneration counter for struct se_device"
* upon a successful REGISTER, see spc4r17 section 6.3.2
* READ_KEYS service action.
*/
pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev);
pr_reg->pr_res_key = sa_res_key;
pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
" Key for %s to: 0x%016Lx PRgeneration:"
" 0x%08x\n", cmd->se_tfo->get_fabric_name(),
(register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "",
pr_reg->pr_reg_nacl->initiatorname,
pr_reg->pr_res_key, pr_reg->pr_res_generation);
/*
* sa_res_key=0 Unregister Reservation Key for registered I_T
* Nexus sa_res_key=1 Change Reservation Key for registered I_T
* Nexus.
*/
if (!sa_res_key) {
} else {
/*
* sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
*/
pr_holder = core_scsi3_check_implict_release(
cmd->se_dev, pr_reg);
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
ret = TCM_RESERVATION_CONFLICT;
goto out_put_pr_reg;
goto out;
}
spin_lock(&pr_tmpl->registration_lock);
@ -2237,8 +2174,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* RESERVATIONS RELEASED.
*/
if (pr_holder &&
(type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
(pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
list_for_each_entry(pr_reg_p,
&pr_tmpl->registration_list,
pr_reg_list) {
@ -2250,60 +2187,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ASCQ_2AH_RESERVATIONS_RELEASED);
}
}
spin_unlock(&pr_tmpl->registration_lock);
if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
core_scsi3_update_and_write_aptpl(dev, NULL, 0);
pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
" for UNREGISTER\n");
return 0;
}
if (!core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len)) {
pr_tmpl->pr_aptpl_active = 1;
pr_debug("SPC-3 PR: Set APTPL Bit Activated"
" for UNREGISTER\n");
}
goto out_free_aptpl_buf;
}
/*
* Increment PRgeneration counter for struct se_device"
* upon a successful REGISTER, see spc4r17 section 6.3.2
* READ_KEYS service action.
*/
pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev);
pr_reg->pr_res_key = sa_res_key;
pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
" Key for %s to: 0x%016Lx PRgeneration:"
" 0x%08x\n", cmd->se_tfo->get_fabric_name(),
(ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
pr_reg->pr_reg_nacl->initiatorname,
pr_reg->pr_res_key, pr_reg->pr_res_generation);
ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
core_scsi3_update_and_write_aptpl(dev, NULL, 0);
pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
" for REGISTER\n");
ret = 0;
goto out_put_pr_reg;
}
if (!core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len)) {
pr_tmpl->pr_aptpl_active = 1;
pr_debug("SPC-3 PR: Set APTPL Bit Activated"
" for REGISTER\n");
}
out_free_aptpl_buf:
kfree(pr_aptpl_buf);
ret = 0;
out_put_pr_reg:
out:
core_scsi3_put_pr_reg(pr_reg);
return ret;
}
@ -2340,7 +2230,6 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
struct t10_reservation *pr_tmpl = &dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
sense_reason_t ret;
int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@ -2466,8 +2355,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
pr_reg->pr_res_type = type;
pr_reg->pr_res_holder = 1;
dev->dev_pr_res_holder = pr_reg;
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
@ -2476,17 +2364,11 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
i_buf);
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL metadata"
" for RESERVE\n");
}
}
if (pr_tmpl->pr_aptpl_active)
core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
ret = 0;
out_put_pr_reg:
@ -2524,11 +2406,9 @@ static void __core_scsi3_complete_pro_release(
{
struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Go ahead and release the current PR reservation holder.
*/
@ -2541,7 +2421,7 @@ static void __core_scsi3_complete_pro_release(
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
tfo->get_fabric_name(), se_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
i_buf);
/*
* Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
*/
@ -2702,12 +2582,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
spin_unlock(&pr_tmpl->registration_lock);
write_aptpl:
if (pr_tmpl->pr_aptpl_active) {
if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
}
}
if (pr_tmpl->pr_aptpl_active)
core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
out_put_pr_reg:
core_scsi3_put_pr_reg(pr_reg);
return ret;
@ -2791,11 +2668,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
cmd->se_tfo->get_fabric_name());
if (pr_tmpl->pr_aptpl_active) {
core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
pr_debug("SPC-3 PR: Updated APTPL metadata"
" for CLEAR\n");
}
core_scsi3_update_and_write_aptpl(cmd->se_dev, false);
core_scsi3_pr_generation(dev);
return 0;
@ -2810,16 +2683,14 @@ static void __core_scsi3_complete_pro_preempt(
struct list_head *preempt_and_abort_list,
int type,
int scope,
int abort)
enum preempt_type preempt_type)
{
struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Do an implict RELEASE of the existing reservation.
*/
@ -2834,12 +2705,12 @@ static void __core_scsi3_complete_pro_preempt(
pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
nacl->initiatorname, i_buf);
/*
* For PREEMPT_AND_ABORT, add the preempting reservation's
* struct t10_pr_registration to the list that will be compared
@ -2869,14 +2740,13 @@ static void core_scsi3_release_preempt_and_abort(
pr_reg->pr_reg_deve = NULL;
pr_reg->pr_reg_nacl = NULL;
kfree(pr_reg->pr_aptpl_buf);
kmem_cache_free(t10_pr_reg_cache, pr_reg);
}
}
static sense_reason_t
core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
u64 sa_res_key, int abort)
u64 sa_res_key, enum preempt_type preempt_type)
{
struct se_device *dev = cmd->se_dev;
struct se_node_acl *pr_reg_nacl;
@ -2896,7 +2766,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
(preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "");
return TCM_RESERVATION_CONFLICT;
}
if (pr_reg_n->pr_res_key != res_key) {
@ -2965,7 +2835,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
pr_reg_nacl = pr_reg->pr_reg_nacl;
pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
__core_scsi3_free_registration(dev, pr_reg,
(abort) ? &preempt_and_abort_list :
(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
NULL, calling_it_nexus);
released_regs++;
} else {
@ -2993,7 +2863,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
pr_reg_nacl = pr_reg->pr_reg_nacl;
pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
__core_scsi3_free_registration(dev, pr_reg,
(abort) ? &preempt_and_abort_list :
(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
NULL, 0);
released_regs++;
}
@ -3022,24 +2892,17 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
*/
if (pr_res_holder && all_reg && !(sa_res_key)) {
__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
(abort) ? &preempt_and_abort_list : NULL,
type, scope, abort);
(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
type, scope, preempt_type);
if (abort)
if (preempt_type == PREEMPT_AND_ABORT)
core_scsi3_release_preempt_and_abort(
&preempt_and_abort_list, pr_reg_n);
}
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL"
" metadata for PREEMPT%s\n", (abort) ?
"_AND_ABORT" : "");
}
}
if (pr_tmpl->pr_aptpl_active)
core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
core_scsi3_put_pr_reg(pr_reg_n);
core_scsi3_pr_generation(cmd->se_dev);
@ -3103,7 +2966,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
pr_reg_nacl = pr_reg->pr_reg_nacl;
pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
__core_scsi3_free_registration(dev, pr_reg,
(abort) ? &preempt_and_abort_list : NULL,
(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
calling_it_nexus);
/*
* e) Establish a unit attention condition for the initiator
@ -3120,8 +2983,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
* I_T nexus using the contents of the SCOPE and TYPE fields;
*/
__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
(abort) ? &preempt_and_abort_list : NULL,
type, scope, abort);
(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
type, scope, preempt_type);
/*
* d) Process tasks as defined in 5.7.1;
* e) See above..
@ -3161,20 +3024,14 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
* been removed from the primary pr_reg list), except the
* new persistent reservation holder, the calling Initiator Port.
*/
if (abort) {
if (preempt_type == PREEMPT_AND_ABORT) {
core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
pr_reg_n);
}
if (pr_tmpl->pr_aptpl_active) {
if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
"%s\n", abort ? "_AND_ABORT" : "");
}
}
if (pr_tmpl->pr_aptpl_active)
core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
core_scsi3_put_pr_reg(pr_reg_n);
core_scsi3_pr_generation(cmd->se_dev);
@ -3183,7 +3040,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
static sense_reason_t
core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope,
u64 res_key, u64 sa_res_key, int abort)
u64 res_key, u64 sa_res_key, enum preempt_type preempt_type)
{
switch (type) {
case PR_TYPE_WRITE_EXCLUSIVE:
@ -3193,10 +3050,10 @@ core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope,
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
return core_scsi3_pro_preempt(cmd, type, scope, res_key,
sa_res_key, abort);
sa_res_key, preempt_type);
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
" Type: 0x%02x\n", (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", type);
return TCM_INVALID_CDB_FIELD;
}
}
@ -3220,7 +3077,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
unsigned char *initiator_str;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
int new_reg = 0, type, scope, matching_iname, prf_isid;
int new_reg = 0, type, scope, matching_iname;
sense_reason_t ret;
unsigned short rtpi;
unsigned char proto_ident;
@ -3564,8 +3421,7 @@ after_iport_check:
dest_pr_reg->pr_res_holder = 1;
dest_pr_reg->pr_res_type = type;
pr_reg->pr_res_scope = scope;
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Increment PRGeneration for existing registrations..
*/
@ -3581,7 +3437,7 @@ after_iport_check:
pr_debug("SPC-3 PR Successfully moved reservation from"
" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
i_buf, dest_tf_ops->get_fabric_name(),
dest_node_acl->initiatorname, (iport_ptr != NULL) ?
iport_ptr : "");
/*
@ -3602,24 +3458,7 @@ after_iport_check:
} else
core_scsi3_put_pr_reg(pr_reg);
/*
* Clear the APTPL metadata if APTPL has been disabled, otherwise
* write out the updated metadata to struct file for this SCSI device.
*/
if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
" REGISTER_AND_MOVE\n");
} else {
pr_tmpl->pr_aptpl_active = 1;
if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&dest_pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
" REGISTER_AND_MOVE\n");
}
}
core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
transport_kunmap_data_sg(cmd);
@ -3752,7 +3591,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
switch (sa) {
case PRO_REGISTER:
ret = core_scsi3_emulate_pro_register(cmd,
res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, REGISTER);
break;
case PRO_RESERVE:
ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key);
@ -3765,15 +3604,15 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
break;
case PRO_PREEMPT:
ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
res_key, sa_res_key, 0);
res_key, sa_res_key, PREEMPT);
break;
case PRO_PREEMPT_AND_ABORT:
ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
res_key, sa_res_key, 1);
res_key, sa_res_key, PREEMPT_AND_ABORT);
break;
case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
ret = core_scsi3_emulate_pro_register(cmd,
0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, REGISTER_AND_IGNORE_EXISTING_KEY);
break;
case PRO_REGISTER_AND_MOVE:
ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key,

View File

@ -45,7 +45,7 @@
extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);

View File

@ -139,6 +139,11 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
rd_dev->rd_page_count);
return -EINVAL;
}
/* Don't need backing pages for NULLIO */
if (rd_dev->rd_flags & RDF_NULLIO)
return 0;
total_sg_needed = rd_dev->rd_page_count;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;

View File

@ -38,11 +38,27 @@ static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
unsigned char *rbuf;
unsigned char buf[8];
u32 blocks;
/*
* SBC-2 says:
* If the PMI bit is set to zero and the LOGICAL BLOCK
* ADDRESS field is not set to zero, the device server shall
* terminate the command with CHECK CONDITION status with
* the sense key set to ILLEGAL REQUEST and the additional
* sense code set to INVALID FIELD IN CDB.
*
* In SBC-3, these fields are obsolete, but some SCSI
* compliance tests actually check this, so we might as well
* follow SBC-2.
*/
if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
return TCM_INVALID_CDB_FIELD;
if (blocks_long >= 0x00000000ffffffff)
blocks = 0xffffffff;
else
@ -581,7 +597,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
pr_err("cmd exceeds last lba %llu "
"(lba %llu, sectors %u)\n",
end_lba, cmd->t_task_lba, sectors);
return TCM_INVALID_CDB_FIELD;
return TCM_ADDRESS_OUT_OF_RANGE;
}
size = sbc_get_size(cmd, sectors);

View File

@ -85,13 +85,8 @@ void core_tmr_release_req(
static void core_tmr_handle_tas_abort(
struct se_node_acl *tmr_nacl,
struct se_cmd *cmd,
int tas,
int fe_count)
int tas)
{
if (!fe_count) {
transport_cmd_finish_abort(cmd, 1);
return;
}
/*
* TASK ABORTED status (TAS) bit support
*/
@ -253,7 +248,6 @@ static void core_tmr_drain_state_list(
LIST_HEAD(drain_task_list);
struct se_cmd *cmd, *next;
unsigned long flags;
int fe_count;
/*
* Complete outstanding commands with TASK_ABORTED SAM status.
@ -329,12 +323,10 @@ static void core_tmr_drain_state_list(
spin_lock_irqsave(&cmd->t_state_lock, flags);
target_stop_cmd(cmd, &flags);
fe_count = atomic_read(&cmd->t_fe_count);
cmd->transport_state |= CMD_T_ABORTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
}
}

View File

@ -52,6 +52,9 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
#define CREATE_TRACE_POINTS
#include <trace/events/target.h>
static struct workqueue_struct *target_completion_wq;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
@ -446,11 +449,15 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
bool write_pending)
{
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (write_pending)
cmd->t_state = TRANSPORT_WRITE_PENDING;
/*
* Determine if IOCTL context caller in requesting the stopping of this
* command for LUN shutdown purposes.
@ -515,7 +522,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
return transport_cmd_check_stop(cmd, true);
return transport_cmd_check_stop(cmd, true, false);
}
static void transport_lun_remove_cmd(struct se_cmd *cmd)
@ -526,13 +533,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
if (!lun)
return;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
target_remove_from_state_list(cmd);
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
spin_lock_irqsave(&lun->lun_cmd_lock, flags);
if (!list_empty(&cmd->se_lun_node))
list_del_init(&cmd->se_lun_node);
@ -1092,7 +1092,6 @@ sense_reason_t
target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
{
struct se_device *dev = cmd->se_dev;
unsigned long flags;
sense_reason_t ret;
/*
@ -1127,6 +1126,8 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
*/
memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
trace_target_sequencer_start(cmd);
/*
* Check for an existing UNIT ATTENTION condition
*/
@ -1152,9 +1153,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
if (ret)
return ret;
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep)
@ -1552,7 +1551,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
ret = cmd->se_tfo->queue_status(cmd);
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo-> queue_status(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
@ -1583,10 +1583,6 @@ static void __target_execute_cmd(struct se_cmd *cmd)
{
sense_reason_t ret;
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
spin_unlock_irq(&cmd->t_state_lock);
if (cmd->execute_cmd) {
ret = cmd->execute_cmd(cmd);
if (ret) {
@ -1693,11 +1689,17 @@ void target_execute_cmd(struct se_cmd *cmd)
}
cmd->t_state = TRANSPORT_PROCESSING;
cmd->transport_state |= CMD_T_ACTIVE;
cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
if (!target_handle_task_attr(cmd))
__target_execute_cmd(cmd);
if (target_handle_task_attr(cmd)) {
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
return;
}
__target_execute_cmd(cmd);
}
EXPORT_SYMBOL(target_execute_cmd);
@ -1770,6 +1772,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
transport_complete_task_attr(cmd);
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
if (ret)
goto out;
@ -1777,6 +1780,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
break;
case DMA_TO_DEVICE:
@ -1787,6 +1791,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
break;
default:
@ -1865,6 +1870,7 @@ static void target_complete_ok_work(struct work_struct *work)
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
@ -1893,6 +1899,7 @@ static void target_complete_ok_work(struct work_struct *work)
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
@ -1956,11 +1963,7 @@ static int transport_release_cmd(struct se_cmd *cmd)
* If this cmd has been setup with target_get_sess_cmd(), drop
* the kref and call ->release_cmd() in kref callback.
*/
if (cmd->check_release != 0)
return target_put_sess_cmd(cmd->se_sess, cmd);
cmd->se_tfo->release_cmd(cmd);
return 1;
return target_put_sess_cmd(cmd->se_sess, cmd);
}
/**
@ -1971,21 +1974,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
*/
static int transport_put_cmd(struct se_cmd *cmd)
{
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (atomic_read(&cmd->t_fe_count) &&
!atomic_dec_and_test(&cmd->t_fe_count)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
target_remove_from_state_list(cmd);
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_free_pages(cmd);
return transport_release_cmd(cmd);
}
@ -2103,9 +2091,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
atomic_inc(&cmd->t_fe_count);
/*
* If this command is not a write we can execute it right here,
* for write buffers we need to notify the fabric driver first
@ -2116,12 +2101,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
target_execute_cmd(cmd);
return 0;
}
spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_WRITE_PENDING;
spin_unlock_irq(&cmd->t_state_lock);
transport_cmd_check_stop(cmd, false);
transport_cmd_check_stop(cmd, false, true);
ret = cmd->se_tfo->write_pending(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
@ -2202,8 +2182,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
goto out;
}
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
se_cmd->check_release = 1;
out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
return ret;
@ -2319,7 +2297,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_cmd_check_stop(cmd, false);
transport_cmd_check_stop(cmd, false, false);
return -EPERM;
}
cmd->transport_state |= CMD_T_LUN_FE_STOP;
@ -2427,7 +2405,7 @@ check_cond:
spin_unlock_irqrestore(&cmd->t_state_lock,
cmd_flags);
transport_cmd_check_stop(cmd, false);
transport_cmd_check_stop(cmd, false, false);
complete(&cmd->transport_lun_fe_stop_comp);
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
continue;
@ -2778,6 +2756,7 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
after_reason:
trace_target_cmd_complete(cmd);
return cmd->se_tfo->queue_status(cmd);
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
@ -2794,6 +2773,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
trace_target_cmd_complete(cmd);
cmd->se_tfo->queue_status(cmd);
return 1;
@ -2831,6 +2811,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
" ITT: 0x%08x\n", cmd->t_task_cdb[0],
cmd->se_tfo->get_task_tag(cmd));
trace_target_cmd_complete(cmd);
cmd->se_tfo->queue_status(cmd);
}

View File

@ -161,7 +161,7 @@ int ft_write_pending(struct se_cmd *);
int ft_write_pending_status(struct se_cmd *);
u32 ft_get_task_tag(struct se_cmd *);
int ft_get_cmd_state(struct se_cmd *);
int ft_queue_tm_resp(struct se_cmd *);
void ft_queue_tm_resp(struct se_cmd *);
/*
* other internal functions.

View File

@ -394,14 +394,14 @@ static void ft_send_tm(struct ft_cmd *cmd)
/*
* Send status from completed task management request.
*/
int ft_queue_tm_resp(struct se_cmd *se_cmd)
void ft_queue_tm_resp(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct se_tmr_req *tmr = se_cmd->se_tmr_req;
enum fcp_resp_rsp_codes code;
if (cmd->aborted)
return 0;
return;
switch (tmr->response) {
case TMR_FUNCTION_COMPLETE:
code = FCP_TMF_CMPL;
@ -413,10 +413,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
code = FCP_TMF_REJECTED;
break;
case TMR_TASK_DOES_NOT_EXIST:
case TMR_TASK_STILL_ALLEGIANT:
case TMR_TASK_FAILOVER_NOT_SUPPORTED:
case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
case TMR_FUNCTION_AUTHORIZATION_FAILED:
default:
code = FCP_TMF_FAILED;
break;
@ -424,7 +421,6 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
return 0;
}
static void ft_send_work(struct work_struct *work);

View File

@ -1467,9 +1467,8 @@ static int usbg_get_cmd_state(struct se_cmd *se_cmd)
return 0;
}
static int usbg_queue_tm_rsp(struct se_cmd *se_cmd)
static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
{
return 0;
}
static const char *usbg_check_wwn(const char *name)

View File

@ -448,7 +448,19 @@ static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
{
return;
struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
struct tcm_vhost_cmd, tvc_se_cmd);
if (tv_cmd->tvc_sgl_count) {
u32 i;
for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
put_page(sg_page(&tv_cmd->tvc_sgl[i]));
kfree(tv_cmd->tvc_sgl);
}
tcm_vhost_put_inflight(tv_cmd->inflight);
kfree(tv_cmd);
}
static int tcm_vhost_shutdown_session(struct se_session *se_sess)
@ -518,9 +530,9 @@ static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
return 0;
}
static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
{
return 0;
return;
}
static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
@ -560,19 +572,13 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
/* TODO locking against target/backend threads? */
transport_generic_free_cmd(se_cmd, 1);
transport_generic_free_cmd(se_cmd, 0);
if (cmd->tvc_sgl_count) {
u32 i;
for (i = 0; i < cmd->tvc_sgl_count; i++)
put_page(sg_page(&cmd->tvc_sgl[i]));
}
kfree(cmd->tvc_sgl);
}
tcm_vhost_put_inflight(cmd->inflight);
kfree(cmd);
static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
{
return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
}
static void
@ -856,7 +862,7 @@ static void tcm_vhost_submission_work(struct work_struct *work)
cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
cmd->tvc_lun, cmd->tvc_exp_data_len,
cmd->tvc_task_attr, cmd->tvc_data_direction,
0, sg_ptr, cmd->tvc_sgl_count,
TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
sg_bidi_ptr, sg_no_bidi);
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
@ -2028,6 +2034,7 @@ static struct target_core_fabric_ops tcm_vhost_ops = {
.tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
.tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
.release_cmd = tcm_vhost_release_cmd,
.check_stop_free = vhost_scsi_check_stop_free,
.shutdown_session = tcm_vhost_shutdown_session,
.close_session = tcm_vhost_close_session,
.sess_get_index = tcm_vhost_sess_get_index,

View File

@ -34,8 +34,6 @@ extern void iscsit_put_transport(struct iscsit_transport *);
/*
* From iscsi_target.c
*/
extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *,
struct iscsi_cmd *);
extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
unsigned char *);
extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
@ -45,18 +43,26 @@ extern int iscsit_check_dataout_hdr(struct iscsi_conn *, unsigned char *,
struct iscsi_cmd **);
extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *,
bool);
extern int iscsit_handle_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
unsigned char *);
extern int iscsit_setup_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
struct iscsi_nopout *);
extern int iscsit_process_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
struct iscsi_nopout *);
extern int iscsit_handle_logout_cmd(struct iscsi_conn *, struct iscsi_cmd *,
unsigned char *);
extern int iscsit_handle_task_mgt_cmd(struct iscsi_conn *, struct iscsi_cmd *,
unsigned char *);
extern int iscsit_setup_text_cmd(struct iscsi_conn *, struct iscsi_cmd *,
struct iscsi_text *);
extern int iscsit_process_text_cmd(struct iscsi_conn *, struct iscsi_cmd *,
struct iscsi_text *);
extern void iscsit_build_rsp_pdu(struct iscsi_cmd *, struct iscsi_conn *,
bool, struct iscsi_scsi_rsp *);
extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
struct iscsi_nopin *, bool);
extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
struct iscsi_tm_rsp *);
extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *,
struct iscsi_text_rsp *);
extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
struct iscsi_reject *);
extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
@ -66,6 +72,10 @@ extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
* From iscsi_target_device.c
*/
extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
/*
* From iscsi_target_erl0.c
*/
extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
/*
* From iscsi_target_erl1.c
*/
@ -80,4 +90,5 @@ extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
* From iscsi_target_util.c
*/
extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, __be32);
extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
unsigned char *, __be32);

View File

@ -218,14 +218,11 @@ enum tcm_tmreq_table {
/* fabric independent task management response values */
enum tcm_tmrsp_table {
TMR_FUNCTION_COMPLETE = 0,
TMR_TASK_DOES_NOT_EXIST = 1,
TMR_LUN_DOES_NOT_EXIST = 2,
TMR_TASK_STILL_ALLEGIANT = 3,
TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5,
TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
TMR_FUNCTION_REJECTED = 255,
TMR_FUNCTION_COMPLETE = 1,
TMR_TASK_DOES_NOT_EXIST = 2,
TMR_LUN_DOES_NOT_EXIST = 3,
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 4,
TMR_FUNCTION_REJECTED = 5,
};
/*
@ -339,8 +336,6 @@ struct t10_pr_registration {
/* Used during APTPL metadata reading */
#define PR_APTPL_MAX_TPORT_LEN 256
unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
/* For writing out live meta data */
unsigned char *pr_aptpl_buf;
u16 pr_aptpl_rpti;
u16 pr_reg_tpgt;
/* Reservation effects all target ports */
@ -374,9 +369,7 @@ struct t10_reservation {
/* Activate Persistence across Target Power Loss enabled
* for SCSI device */
int pr_aptpl_active;
/* Used by struct t10_reservation->pr_aptpl_buf_len */
#define PR_APTPL_BUF_LEN 8192
u32 pr_aptpl_buf_len;
u32 pr_generation;
spinlock_t registration_lock;
spinlock_t aptpl_reg_lock;
@ -424,8 +417,6 @@ struct se_cmd {
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
unsigned check_release:1;
unsigned cmd_wait_set:1;
unsigned unknown_data_length:1;
/* See se_cmd_flags_table */
@ -458,7 +449,6 @@ struct se_cmd {
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
atomic_t t_fe_count;
unsigned int transport_state;
#define CMD_T_ABORTED (1 << 0)
#define CMD_T_ACTIVE (1 << 1)
@ -802,11 +792,12 @@ struct se_portal_group {
struct target_core_fabric_ops *se_tpg_tfo;
struct se_wwn *se_tpg_wwn;
struct config_group tpg_group;
struct config_group *tpg_default_groups[6];
struct config_group *tpg_default_groups[7];
struct config_group tpg_lun_group;
struct config_group tpg_np_group;
struct config_group tpg_acl_group;
struct config_group tpg_attrib_group;
struct config_group tpg_auth_group;
struct config_group tpg_param_group;
};

View File

@ -23,6 +23,7 @@ struct target_fabric_configfs_template {
struct config_item_type tfc_tpg_np_cit;
struct config_item_type tfc_tpg_np_base_cit;
struct config_item_type tfc_tpg_attrib_cit;
struct config_item_type tfc_tpg_auth_cit;
struct config_item_type tfc_tpg_param_cit;
struct config_item_type tfc_tpg_nacl_cit;
struct config_item_type tfc_tpg_nacl_base_cit;

View File

@ -61,7 +61,7 @@ struct target_core_fabric_ops {
int (*get_cmd_state)(struct se_cmd *);
int (*queue_data_in)(struct se_cmd *);
int (*queue_status)(struct se_cmd *);
int (*queue_tm_rsp)(struct se_cmd *);
void (*queue_tm_rsp)(struct se_cmd *);
/*
* fabric module calls for target_core_fabric_configfs.c
*/

View File

@ -62,6 +62,17 @@ static struct target_fabric_tpg_attrib_attribute _fabric##_tpg_attrib_##_name =
_fabric##_tpg_attrib_show_##_name, \
_fabric##_tpg_attrib_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_tpg_auth, se_portal_group);
#define TF_TPG_AUTH_ATTR(_fabric, _name, _mode) \
static struct target_fabric_tpg_auth_attribute _fabric##_tpg_auth_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_tpg_auth_show_##_name, \
_fabric##_tpg_auth_store_##_name);
#define TF_TPG_AUTH_ATTR_RO(_fabric, _name) \
static struct target_fabric_tpg_auth_attribute _fabric##_tpg_auth_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_tpg_auth_show_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_tpg_param, se_portal_group);
#define TF_TPG_PARAM_ATTR(_fabric, _name, _mode) \

View File

@ -0,0 +1,214 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM target
#if !defined(_TRACE_TARGET_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_TARGET_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
/* cribbed verbatim from <trace/event/scsi.h> */
#define scsi_opcode_name(opcode) { opcode, #opcode }
#define show_opcode_name(val) \
__print_symbolic(val, \
scsi_opcode_name(TEST_UNIT_READY), \
scsi_opcode_name(REZERO_UNIT), \
scsi_opcode_name(REQUEST_SENSE), \
scsi_opcode_name(FORMAT_UNIT), \
scsi_opcode_name(READ_BLOCK_LIMITS), \
scsi_opcode_name(REASSIGN_BLOCKS), \
scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \
scsi_opcode_name(READ_6), \
scsi_opcode_name(WRITE_6), \
scsi_opcode_name(SEEK_6), \
scsi_opcode_name(READ_REVERSE), \
scsi_opcode_name(WRITE_FILEMARKS), \
scsi_opcode_name(SPACE), \
scsi_opcode_name(INQUIRY), \
scsi_opcode_name(RECOVER_BUFFERED_DATA), \
scsi_opcode_name(MODE_SELECT), \
scsi_opcode_name(RESERVE), \
scsi_opcode_name(RELEASE), \
scsi_opcode_name(COPY), \
scsi_opcode_name(ERASE), \
scsi_opcode_name(MODE_SENSE), \
scsi_opcode_name(START_STOP), \
scsi_opcode_name(RECEIVE_DIAGNOSTIC), \
scsi_opcode_name(SEND_DIAGNOSTIC), \
scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \
scsi_opcode_name(SET_WINDOW), \
scsi_opcode_name(READ_CAPACITY), \
scsi_opcode_name(READ_10), \
scsi_opcode_name(WRITE_10), \
scsi_opcode_name(SEEK_10), \
scsi_opcode_name(POSITION_TO_ELEMENT), \
scsi_opcode_name(WRITE_VERIFY), \
scsi_opcode_name(VERIFY), \
scsi_opcode_name(SEARCH_HIGH), \
scsi_opcode_name(SEARCH_EQUAL), \
scsi_opcode_name(SEARCH_LOW), \
scsi_opcode_name(SET_LIMITS), \
scsi_opcode_name(PRE_FETCH), \
scsi_opcode_name(READ_POSITION), \
scsi_opcode_name(SYNCHRONIZE_CACHE), \
scsi_opcode_name(LOCK_UNLOCK_CACHE), \
scsi_opcode_name(READ_DEFECT_DATA), \
scsi_opcode_name(MEDIUM_SCAN), \
scsi_opcode_name(COMPARE), \
scsi_opcode_name(COPY_VERIFY), \
scsi_opcode_name(WRITE_BUFFER), \
scsi_opcode_name(READ_BUFFER), \
scsi_opcode_name(UPDATE_BLOCK), \
scsi_opcode_name(READ_LONG), \
scsi_opcode_name(WRITE_LONG), \
scsi_opcode_name(CHANGE_DEFINITION), \
scsi_opcode_name(WRITE_SAME), \
scsi_opcode_name(UNMAP), \
scsi_opcode_name(READ_TOC), \
scsi_opcode_name(LOG_SELECT), \
scsi_opcode_name(LOG_SENSE), \
scsi_opcode_name(XDWRITEREAD_10), \
scsi_opcode_name(MODE_SELECT_10), \
scsi_opcode_name(RESERVE_10), \
scsi_opcode_name(RELEASE_10), \
scsi_opcode_name(MODE_SENSE_10), \
scsi_opcode_name(PERSISTENT_RESERVE_IN), \
scsi_opcode_name(PERSISTENT_RESERVE_OUT), \
scsi_opcode_name(VARIABLE_LENGTH_CMD), \
scsi_opcode_name(REPORT_LUNS), \
scsi_opcode_name(MAINTENANCE_IN), \
scsi_opcode_name(MAINTENANCE_OUT), \
scsi_opcode_name(MOVE_MEDIUM), \
scsi_opcode_name(EXCHANGE_MEDIUM), \
scsi_opcode_name(READ_12), \
scsi_opcode_name(WRITE_12), \
scsi_opcode_name(WRITE_VERIFY_12), \
scsi_opcode_name(SEARCH_HIGH_12), \
scsi_opcode_name(SEARCH_EQUAL_12), \
scsi_opcode_name(SEARCH_LOW_12), \
scsi_opcode_name(READ_ELEMENT_STATUS), \
scsi_opcode_name(SEND_VOLUME_TAG), \
scsi_opcode_name(WRITE_LONG_2), \
scsi_opcode_name(READ_16), \
scsi_opcode_name(WRITE_16), \
scsi_opcode_name(VERIFY_16), \
scsi_opcode_name(WRITE_SAME_16), \
scsi_opcode_name(SERVICE_ACTION_IN), \
scsi_opcode_name(SAI_READ_CAPACITY_16), \
scsi_opcode_name(SAI_GET_LBA_STATUS), \
scsi_opcode_name(MI_REPORT_TARGET_PGS), \
scsi_opcode_name(MO_SET_TARGET_PGS), \
scsi_opcode_name(READ_32), \
scsi_opcode_name(WRITE_32), \
scsi_opcode_name(WRITE_SAME_32), \
scsi_opcode_name(ATA_16), \
scsi_opcode_name(ATA_12))
#define show_task_attribute_name(val) \
__print_symbolic(val, \
{ MSG_SIMPLE_TAG, "SIMPLE" }, \
{ MSG_HEAD_TAG, "HEAD" }, \
{ MSG_ORDERED_TAG, "ORDERED" }, \
{ MSG_ACA_TAG, "ACA" } )
#define show_scsi_status_name(val) \
__print_symbolic(val, \
{ SAM_STAT_GOOD, "GOOD" }, \
{ SAM_STAT_CHECK_CONDITION, "CHECK CONDITION" }, \
{ SAM_STAT_CONDITION_MET, "CONDITION MET" }, \
{ SAM_STAT_BUSY, "BUSY" }, \
{ SAM_STAT_INTERMEDIATE, "INTERMEDIATE" }, \
{ SAM_STAT_INTERMEDIATE_CONDITION_MET, "INTERMEDIATE CONDITION MET" }, \
{ SAM_STAT_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, \
{ SAM_STAT_COMMAND_TERMINATED, "COMMAND TERMINATED" }, \
{ SAM_STAT_TASK_SET_FULL, "TASK SET FULL" }, \
{ SAM_STAT_ACA_ACTIVE, "ACA ACTIVE" }, \
{ SAM_STAT_TASK_ABORTED, "TASK ABORTED" } )
TRACE_EVENT(target_sequencer_start,
TP_PROTO(struct se_cmd *cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field( unsigned int, unpacked_lun )
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
__string( initiator, cmd->se_sess->se_node_acl->initiatorname )
),
TP_fast_assign(
__entry->unpacked_lun = cmd->se_lun->unpacked_lun;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
),
TP_printk("%s -> LUN %03u %s data_length %6u CDB %s (TA:%s C:%02x)",
__get_str(initiator), __entry->unpacked_lun,
show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
scsi_command_size(__entry->cdb) <= 16 ?
__entry->cdb[scsi_command_size(__entry->cdb) - 1] :
__entry->cdb[1]
)
);
TRACE_EVENT(target_cmd_complete,
TP_PROTO(struct se_cmd *cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field( unsigned int, unpacked_lun )
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
__field( unsigned char, scsi_status )
__field( unsigned char, sense_length )
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
__array( unsigned char, sense_data, 18 )
__string(initiator, cmd->se_sess->se_node_acl->initiatorname)
),
TP_fast_assign(
__entry->unpacked_lun = cmd->se_lun->unpacked_lun;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
__entry->scsi_status = cmd->scsi_status;
__entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
memcpy(__entry->sense_data, cmd->sense_buffer, __entry->sense_length);
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
),
TP_printk("%s <- LUN %03u status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)",
__get_str(initiator), __entry->unpacked_lun,
show_scsi_status_name(__entry->scsi_status),
__entry->sense_length, __entry->sense_length ? " / " : "",
__print_hex(__entry->sense_data, __entry->sense_length),
show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
scsi_command_size(__entry->cdb) <= 16 ?
__entry->cdb[scsi_command_size(__entry->cdb) - 1] :
__entry->cdb[1]
)
);
#endif /* _TRACE_TARGET_H */
/* This part must be outside protection */
#include <trace/define_trace.h>