target: pass sense_reason as a return value

Pass the sense reason as an explicit return value from the I/O submission
path instead of storing it in struct se_cmd and using negative return
values.  This cleans up a lot of the code pathes, and with the sparse
annotations for the new sense_reason_t type allows for much better
error checking.

(nab: Convert spc_emulate_modesense + spc_emulate_modeselect to use
      sense_reason_t with Roland's MODE SELECT changes)

Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Roland Dreier <roland@purestorage.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Christoph Hellwig 2012-11-06 12:24:09 -08:00 committed by Nicholas Bellinger
parent fecae40abb
commit de103c93af
24 changed files with 856 additions and 1074 deletions

View File

@ -1730,7 +1730,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
uint64_t unpacked_lun;
u64 data_len;
enum dma_data_direction dir;
int ret;
sense_reason_t ret;
BUG_ON(!send_ioctx);
@ -1755,12 +1755,10 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
break;
}
ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
if (ret) {
if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
ret = TCM_INVALID_CDB_FIELD;
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
goto send_sense;
}
@ -1769,17 +1767,18 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
cmd->data_direction = dir;
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
sizeof(srp_cmd->lun));
if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) {
ret = transport_lookup_cmd_lun(cmd, unpacked_lun);
if (ret) {
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
goto send_sense;
}
ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
if (ret < 0) {
if (ret) {
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
if (ret == TCM_RESERVATION_CONFLICT) {
srpt_queue_status(cmd);
return 0;
} else
}
goto send_sense;
}
@ -1787,8 +1786,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
return 0;
send_sense:
transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
0);
transport_send_check_condition_and_sense(cmd, ret, 0);
return -1;
}
@ -1882,16 +1880,14 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
send_ioctx->tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
if (tcm_tmr < 0) {
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
goto process_tmr;
goto fail;
}
res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
if (res < 0) {
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
goto process_tmr;
goto fail;
}
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
@ -1899,22 +1895,19 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
if (res) {
pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
goto process_tmr;
goto fail;
}
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
process_tmr:
kref_get(&send_ioctx->kref);
if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
transport_generic_handle_tmr(&send_ioctx->cmd);
else
transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0);
return;
fail:
kref_get(&send_ioctx->kref);
transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
}
/**

View File

@ -620,8 +620,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
transport_generic_request_failure(&cmd->se_cmd);
transport_generic_request_failure(&cmd->se_cmd,
TCM_CHECK_CONDITION_ABORT_CMD);
return;
}

View File

@ -767,8 +767,7 @@ static int iscsit_handle_scsi_cmd(
struct iscsi_conn *conn,
unsigned char *buf)
{
int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
int dump_immediate_data = 0, send_check_condition = 0, payload_length;
int data_direction, payload_length, cmdsn_ret = 0, immed_ret;
struct iscsi_cmd *cmd = NULL;
struct iscsi_scsi_req *hdr;
int iscsi_task_attr;
@ -956,35 +955,23 @@ done:
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
/*
* The CDB is going to an se_device_t.
*/
ret = transport_lookup_cmd_lun(&cmd->se_cmd,
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
if (ret < 0) {
if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
pr_debug("Responding to non-acl'ed,"
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
}
send_check_condition = 1;
if (cmd->sense_reason)
goto attach_cmd;
}
transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
if (transport_ret == -ENOMEM) {
cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
} else if (transport_ret < 0) {
/*
* Unsupported SAM Opcode. CHECK_CONDITION will be sent
* in iscsit_execute_cmd() during the CmdSN OOO Execution
* Mechinism.
*/
send_check_condition = 1;
} else {
if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0)
}
goto attach_cmd;
}
if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
@ -1000,11 +987,12 @@ attach_cmd:
*/
core_alua_check_nonop_delay(&cmd->se_cmd);
ret = iscsit_allocate_iovecs(cmd);
if (ret < 0)
if (iscsit_allocate_iovecs(cmd) < 0) {
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 0, buf, cmd);
}
/*
* Check the CmdSN against ExpCmdSN/MaxCmdSN here if
* the Immediate Bit is not set, and no Immediate
@ -1031,10 +1019,7 @@ attach_cmd:
* If no Immediate Data is attached, it's OK to return now.
*/
if (!cmd->immediate_data) {
if (send_check_condition)
return 0;
if (cmd->unsolicited_data) {
if (!cmd->sense_reason && cmd->unsolicited_data) {
iscsit_set_dataout_sequence_values(cmd);
spin_lock_bh(&cmd->dataout_timeout_lock);
@ -1050,19 +1035,17 @@ attach_cmd:
* thread. They are processed in CmdSN order by
* iscsit_check_received_cmdsn() below.
*/
if (send_check_condition) {
if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
}
/*
* Call directly into transport_generic_new_cmd() to perform
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
if (ret < 0) {
cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
}
@ -1079,7 +1062,7 @@ after_immediate_data:
* Special case for Unsupported SAM WRITE Opcodes
* and ImmediateData=Yes.
*/
if (dump_immediate_data) {
if (cmd->sense_reason) {
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return -1;
} else if (cmd->unsolicited_data) {
@ -1272,8 +1255,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
(se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@ -1742,7 +1724,6 @@ static int iscsit_handle_task_mgt_cmd(
ret = transport_lookup_tmr_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
if (ret < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
goto attach;
}
@ -1751,10 +1732,8 @@ static int iscsit_handle_task_mgt_cmd(
switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK:
se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
if (se_tmr->response)
goto attach;
}
break;
case ISCSI_TM_FUNC_ABORT_TASK_SET:
case ISCSI_TM_FUNC_CLEAR_ACA:
@ -1763,14 +1742,12 @@ static int iscsit_handle_task_mgt_cmd(
break;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
break;
case ISCSI_TM_FUNC_TARGET_COLD_RESET:
if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
@ -1781,7 +1758,7 @@ static int iscsit_handle_task_mgt_cmd(
* Perform sanity checks on the ExpDataSN only if the
* TASK_REASSIGN was successful.
*/
if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
if (se_tmr->response)
break;
if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
@ -1792,7 +1769,6 @@ static int iscsit_handle_task_mgt_cmd(
default:
pr_err("Unknown TMR function: 0x%02x, protocol"
" error.\n", function);
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
goto attach;
}

View File

@ -474,7 +474,7 @@ struct iscsi_cmd {
struct scatterlist *first_data_sg;
u32 first_data_sg_off;
u32 kmapped_nents;
sense_reason_t sense_reason;
} ____cacheline_aligned;
struct iscsi_tmr_req {

View File

@ -929,11 +929,10 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
case ISCSI_OP_SCSI_CMD:
/*
* Go ahead and send the CHECK_CONDITION status for
* any SCSI CDB exceptions that may have occurred, also
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
* any SCSI CDB exceptions that may have occurred.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
@ -956,7 +955,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* exception
*/
return transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
cmd->sense_reason, 0);
}
/*
* Special case for delayed CmdSN with Immediate
@ -1013,7 +1012,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
break;
case ISCSI_OP_SCSI_TMFUNC:
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
if (cmd->se_cmd.se_tmr_req->response) {
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
cmd->i_state);

View File

@ -41,7 +41,7 @@
#include "target_core_alua.h"
#include "target_core_ua.h"
static int core_alua_check_transition(int state, int *primary);
static sense_reason_t core_alua_check_transition(int state, int *primary);
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port, int explict, int offline);
@ -59,7 +59,8 @@ struct t10_alua_lu_gp *default_lu_gp;
*
* See spc4r17 section 6.27
*/
int target_emulate_report_target_port_groups(struct se_cmd *cmd)
sense_reason_t
target_emulate_report_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_port *port;
@ -68,6 +69,7 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
unsigned char *buf;
u32 rd_len = 0, off;
int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
/*
* Skip over RESERVED area to first Target port group descriptor
* depending on the PARAMETER DATA FORMAT type..
@ -81,10 +83,11 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
" small for %s header\n", cmd->data_length,
(ext_hdr) ? "extended" : "normal");
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
@ -200,7 +203,8 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
*
* See spc4r17 section 6.35
*/
int target_emulate_set_target_port_groups(struct se_cmd *cmd)
sense_reason_t
target_emulate_set_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_port *port, *l_port = cmd->se_lun->lun_sep;
@ -209,22 +213,23 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
unsigned char *buf;
unsigned char *ptr;
sense_reason_t rc;
u32 len = 4; /* Skip over RESERVED area in header */
int alua_access_state, primary = 0, rc;
int alua_access_state, primary = 0;
u16 tg_pt_id, rtpi;
if (!l_port) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
if (!l_port)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (cmd->data_length < 4) {
pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
" small\n", cmd->data_length);
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
return TCM_INVALID_PARAMETER_LIST;
}
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
@ -233,8 +238,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@ -242,24 +246,22 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
if (!rc) {
if (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
ptr = &buf[4]; /* Skip over RESERVED area in header */
while (len < cmd->data_length) {
bool found = false;
alua_access_state = (ptr[0] & 0x0f);
/*
* Check the received ALUA access state, and determine if
@ -267,7 +269,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* access state.
*/
rc = core_alua_check_transition(alua_access_state, &primary);
if (rc != 0) {
if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
* an invalid combination of target port asymmetric
@ -278,11 +280,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
rc = -1;
/*
* If the ASYMMETRIC ACCESS STATE field (see table 267)
* specifies a primary target port asymmetric access state,
@ -314,11 +314,13 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
rc = core_alua_do_port_transition(tg_pt_gp,
if (!core_alua_do_port_transition(tg_pt_gp,
dev, l_port, nacl,
alua_access_state, 1);
alua_access_state, 1))
found = true;
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
@ -326,15 +328,6 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
break;
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* If not matching target port group ID can be located
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
} else {
/*
* Extact the RELATIVE TARGET PORT IDENTIFIER to identify
@ -353,25 +346,22 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
continue;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
spin_unlock(&dev->se_port_lock);
rc = core_alua_set_tg_pt_secondary_state(
tg_pt_gp_mem, port, 1, 1);
if (!core_alua_set_tg_pt_secondary_state(
tg_pt_gp_mem, port, 1, 1))
found = true;
spin_lock(&dev->se_port_lock);
break;
}
spin_unlock(&dev->se_port_lock);
/*
* If not matching relative target port identifier can
* be located, throw an exception with ASCQ:
* INVALID_PARAMETER_LIST
*/
if (rc != 0) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
if (!found) {
rc = TCM_INVALID_PARAMETER_LIST;
goto out;
}
ptr += 4;
@ -526,7 +516,8 @@ static inline int core_alua_state_transition(
* return 0: Used to signal success
* reutrn -1: Used to signal failure, and invalid cdb field
*/
int target_alua_state_check(struct se_cmd *cmd)
sense_reason_t
target_alua_state_check(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
@ -599,8 +590,7 @@ int target_alua_state_check(struct se_cmd *cmd)
default:
pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
ret = -EINVAL;
break;
return TCM_INVALID_CDB_FIELD;
}
out:
@ -617,15 +607,17 @@ out:
cmd->scsi_asc = 0x04;
cmd->scsi_ascq = alua_ascq;
return TCM_CHECK_CONDITION_NOT_READY;
}
return ret;
return 0;
}
/*
* Check implict and explict ALUA state change request.
*/
static int core_alua_check_transition(int state, int *primary)
static sense_reason_t
core_alua_check_transition(int state, int *primary)
{
switch (state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
@ -647,7 +639,7 @@ static int core_alua_check_transition(int state, int *primary)
break;
default:
pr_err("Unknown ALUA access state: 0x%02x\n", state);
return -EINVAL;
return TCM_INVALID_PARAMETER_LIST;
}
return 0;

View File

@ -72,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern int target_emulate_report_target_port_groups(struct se_cmd *);
extern int target_emulate_set_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
@ -132,6 +132,6 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t);
extern int core_setup_alua(struct se_device *);
extern int target_alua_state_check(struct se_cmd *cmd);
extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
#endif /* TARGET_CORE_ALUA_H */

View File

@ -54,18 +54,16 @@ static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;
int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
sense_reason_t
transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
struct se_lun *se_lun = NULL;
struct se_session *se_sess = se_cmd->se_sess;
struct se_device *dev;
unsigned long flags;
if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -ENODEV;
}
if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
return TCM_NON_EXISTENT_LUN;
spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
@ -77,14 +75,12 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
return -EACCES;
return TCM_WRITE_PROTECTED;
}
if (se_cmd->data_direction == DMA_TO_DEVICE)
@ -109,23 +105,18 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
* MappedLUN=0 exists for this Initiator Port.
*/
if (unpacked_lun != 0) {
se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
return -ENODEV;
return TCM_NON_EXISTENT_LUN;
}
/*
* Force WRITE PROTECT for virtual LUN 0
*/
if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
(se_cmd->data_direction != DMA_NONE)) {
se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -EACCES;
}
(se_cmd->data_direction != DMA_NONE))
return TCM_WRITE_PROTECTED;
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
@ -162,11 +153,8 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
unsigned long flags;
if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
return -ENODEV;
}
spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
@ -186,7 +174,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -ENODEV;
}

View File

@ -309,7 +309,8 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
return 1;
}
static int fd_execute_sync_cache(struct se_cmd *cmd)
static sense_reason_t
fd_execute_sync_cache(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = FD_DEV(dev);
@ -345,17 +346,16 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
if (immed)
return 0;
if (ret) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (ret)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
} else {
else
target_complete_cmd(cmd, SAM_STAT_GOOD);
}
return 0;
}
static int fd_execute_rw(struct se_cmd *cmd)
static sense_reason_t
fd_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
@ -388,10 +388,9 @@ static int fd_execute_rw(struct se_cmd *cmd)
}
}
if (ret < 0) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
}
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
@ -515,7 +514,8 @@ static struct sbc_ops fd_sbc_ops = {
.execute_sync_cache = fd_execute_sync_cache,
};
static int fd_parse_cdb(struct se_cmd *cmd)
static sense_reason_t
fd_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &fd_sbc_ops);
}

View File

@ -261,14 +261,11 @@ static void iblock_end_io_flush(struct bio *bio, int err)
pr_err("IBLOCK: cache flush failed: %d\n", err);
if (cmd) {
if (err) {
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (err)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
} else {
else
target_complete_cmd(cmd, SAM_STAT_GOOD);
}
}
bio_put(bio);
}
@ -277,7 +274,8 @@ static void iblock_end_io_flush(struct bio *bio, int err)
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache.
*/
static int iblock_execute_sync_cache(struct se_cmd *cmd)
static sense_reason_t
iblock_execute_sync_cache(struct se_cmd *cmd)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
int immed = (cmd->t_task_cdb[1] & 0x2);
@ -299,7 +297,8 @@ static int iblock_execute_sync_cache(struct se_cmd *cmd)
return 0;
}
static int iblock_execute_unmap(struct se_cmd *cmd)
static sense_reason_t
iblock_execute_unmap(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@ -307,17 +306,18 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
sector_t lba;
int size;
u32 range;
int ret = 0;
int dl, bd_dl;
sense_reason_t ret = 0;
int dl, bd_dl, err;
if (cmd->data_length < 8) {
pr_warn("UNMAP parameter list length %u too small\n",
cmd->data_length);
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
return TCM_INVALID_PARAMETER_LIST;
}
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
dl = get_unaligned_be16(&buf[0]);
bd_dl = get_unaligned_be16(&buf[2]);
@ -330,8 +330,7 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
size = bd_dl;
if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
ret = TCM_INVALID_PARAMETER_LIST;
goto err;
}
@ -347,22 +346,21 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
(unsigned long long)lba, range);
if (range > dev->dev_attrib.max_unmap_lba_count) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
ret = TCM_INVALID_PARAMETER_LIST;
goto err;
}
if (lba + range > dev->transport->get_blocks(dev) + 1) {
cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
ret = -EINVAL;
ret = TCM_ADDRESS_OUT_OF_RANGE;
goto err;
}
ret = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
GFP_KERNEL, 0);
if (ret < 0) {
if (err < 0) {
pr_err("blkdev_issue_discard() failed: %d\n",
ret);
err);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto err;
}
@ -377,7 +375,8 @@ err:
return ret;
}
static int iblock_execute_write_same(struct se_cmd *cmd)
static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
int ret;
@ -387,7 +386,7 @@ static int iblock_execute_write_same(struct se_cmd *cmd)
0);
if (ret < 0) {
pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
return ret;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
target_complete_cmd(cmd, GOOD);
@ -552,7 +551,8 @@ static void iblock_submit_bios(struct bio_list *list, int rw)
blk_finish_plug(&plug);
}
static int iblock_execute_rw(struct se_cmd *cmd)
static sense_reason_t
iblock_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
@ -598,8 +598,7 @@ static int iblock_execute_rw(struct se_cmd *cmd)
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->dev_attrib.block_size);
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOSYS;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@ -659,9 +658,8 @@ fail_put_bios:
bio_put(bio);
fail_free_ibr:
kfree(ibr);
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail:
return -ENOMEM;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
static sector_t iblock_get_blocks(struct se_device *dev)
@ -706,7 +704,8 @@ static struct sbc_ops iblock_sbc_ops = {
.execute_unmap = iblock_execute_unmap,
};
static int iblock_parse_cdb(struct se_cmd *cmd)
static sense_reason_t
iblock_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &iblock_sbc_ops);
}

View File

@ -101,7 +101,7 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
int target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
/* target_core_stat.c */

File diff suppressed because it is too large Load Diff

View File

@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern int target_scsi2_reservation_release(struct se_cmd *);
extern int target_scsi2_reservation_reserve(struct se_cmd *);
extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
extern int target_scsi3_emulate_pr_in(struct se_cmd *);
extern int target_scsi3_emulate_pr_out(struct se_cmd *);
extern int target_check_reservation(struct se_cmd *cmd);
extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
extern sense_reason_t target_check_reservation(struct se_cmd *);
#endif /* TARGET_CORE_PR_H */

View File

@ -60,7 +60,7 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
static struct se_subsystem_api pscsi_template;
static int pscsi_execute_cmd(struct se_cmd *cmd);
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
static void pscsi_req_done(struct request *, int);
/* pscsi_attach_hba():
@ -642,7 +642,11 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
unsigned char *buf = transport_kmap_data_sg(cmd);
unsigned char *buf;
buf = transport_kmap_data_sg(cmd);
if (!buf)
; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@ -856,9 +860,9 @@ static inline struct bio *pscsi_get_bio(int sg_num)
return bio;
}
static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, enum dma_data_direction data_direction,
struct bio **hbio)
static sense_reason_t
pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction, struct bio **hbio)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct bio *bio = NULL, *tbio = NULL;
@ -946,7 +950,7 @@ static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
}
}
return sgl_nents;
return 0;
fail:
while (*hbio) {
bio = *hbio;
@ -954,8 +958,7 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
@ -982,15 +985,13 @@ static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
}
}
static int pscsi_parse_cdb(struct se_cmd *cmd)
static sense_reason_t
pscsi_parse_cdb(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
if (cmd->se_cmd_flags & SCF_BIDI) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
}
if (cmd->se_cmd_flags & SCF_BIDI)
return TCM_UNSUPPORTED_SCSI_OPCODE;
pscsi_clear_cdb_lun(cdb);
@ -1020,7 +1021,8 @@ static int pscsi_parse_cdb(struct se_cmd *cmd)
}
}
static int pscsi_execute_cmd(struct se_cmd *cmd)
static sense_reason_t
pscsi_execute_cmd(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
@ -1029,7 +1031,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
struct pscsi_plugin_task *pt;
struct request *req;
struct bio *hbio;
int ret;
sense_reason_t ret;
/*
* Dynamically alloc cdb space, since it may be larger than
@ -1037,8 +1039,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
*/
pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
if (!pt) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
cmd->priv = pt;
@ -1052,24 +1053,21 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail;
}
} else {
BUG_ON(!cmd->data_length);
ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
if (ret < 0) {
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (ret)
goto fail;
}
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
if (IS_ERR(req)) {
pr_err("pSCSI: blk_make_request() failed\n");
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail_free_bio;
}
}
@ -1100,10 +1098,10 @@ fail_free_bio:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail:
kfree(pt);
return -ENOMEM;
return ret;
}
/* pscsi_get_device_type():
@ -1152,7 +1150,6 @@ static void pscsi_req_done(struct request *req, int uptodate)
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
pt->pscsi_result);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break;
}

View File

@ -275,7 +275,8 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
static int rd_execute_rw(struct se_cmd *cmd)
static sense_reason_t
rd_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
@ -298,7 +299,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
table = rd_get_sg_table(dev, rd_page);
if (!table)
return -EINVAL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
rd_sg = &table->sg_table[rd_page - table->page_start_offset];
@ -348,7 +349,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
table = rd_get_sg_table(dev, rd_page);
if (!table) {
sg_miter_stop(&m);
return -EINVAL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/* since we increment, the first sg entry is correct */
@ -431,7 +432,8 @@ static struct sbc_ops rd_sbc_ops = {
.execute_rw = rd_execute_rw,
};
static int rd_parse_cdb(struct se_cmd *cmd)
static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &rd_sbc_ops);
}

View File

@ -37,7 +37,8 @@
#include "target_core_ua.h"
static int sbc_emulate_readcapacity(struct se_cmd *cmd)
static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
@ -60,16 +61,18 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
buf[7] = dev->dev_attrib.block_size & 0xff;
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
if (!rbuf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
}
target_complete_cmd(cmd, GOOD);
return 0;
}
static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
static sense_reason_t
sbc_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *rbuf;
@ -97,10 +100,11 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
buf[14] = 0x80;
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
if (!rbuf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
}
target_complete_cmd(cmd, GOOD);
return 0;
@ -129,7 +133,8 @@ int spc_get_write_same_sectors(struct se_cmd *cmd)
}
EXPORT_SYMBOL(spc_get_write_same_sectors);
static int sbc_emulate_verify(struct se_cmd *cmd)
static sense_reason_t
sbc_emulate_verify(struct se_cmd *cmd)
{
target_complete_cmd(cmd, GOOD);
return 0;
@ -313,13 +318,14 @@ out:
kfree(buf);
}
int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sense_reason_t
sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
unsigned int size;
u32 sectors = 0;
int ret;
sense_reason_t ret;
switch (cdb[0]) {
case READ_6:
@ -378,9 +384,9 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
cmd->execute_cmd = ops->execute_rw;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
if (cmd->data_direction != DMA_TO_DEVICE ||
!(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
@ -419,26 +425,26 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
break;
case WRITE_SAME_32:
if (!ops->execute_write_same)
goto out_unsupported_cdb;
return TCM_UNSUPPORTED_SCSI_OPCODE;
sectors = transport_get_sectors_32(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n");
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
if (sbc_write_same_supported(dev, &cdb[10]) < 0)
goto out_unsupported_cdb;
return TCM_UNSUPPORTED_SCSI_OPCODE;
cmd->execute_cmd = ops->execute_write_same;
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
goto out_unsupported_cdb;
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
break;
}
@ -454,7 +460,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
default:
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
}
size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
@ -462,7 +468,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
if (!ops->execute_sync_cache)
goto out_unsupported_cdb;
return TCM_UNSUPPORTED_SCSI_OPCODE;
/*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
@ -483,42 +489,42 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
*/
if (cmd->t_task_lba || sectors) {
if (sbc_check_valid_sectors(cmd) < 0)
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
}
cmd->execute_cmd = ops->execute_sync_cache;
break;
case UNMAP:
if (!ops->execute_unmap)
goto out_unsupported_cdb;
return TCM_UNSUPPORTED_SCSI_OPCODE;
size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = ops->execute_unmap;
break;
case WRITE_SAME_16:
if (!ops->execute_write_same)
goto out_unsupported_cdb;
return TCM_UNSUPPORTED_SCSI_OPCODE;
sectors = transport_get_sectors_16(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
if (sbc_write_same_supported(dev, &cdb[1]) < 0)
goto out_unsupported_cdb;
return TCM_UNSUPPORTED_SCSI_OPCODE;
cmd->execute_cmd = ops->execute_write_same;
break;
case WRITE_SAME:
if (!ops->execute_write_same)
goto out_unsupported_cdb;
return TCM_UNSUPPORTED_SCSI_OPCODE;
sectors = transport_get_sectors_10(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
@ -529,7 +535,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
if (sbc_write_same_supported(dev, &cdb[1]) < 0)
goto out_unsupported_cdb;
return TCM_UNSUPPORTED_SCSI_OPCODE;
cmd->execute_cmd = ops->execute_write_same;
break;
case VERIFY:
@ -556,7 +562,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
/* reject any command that we don't have a handler for */
if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
goto out_unsupported_cdb;
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba;
@ -566,14 +572,14 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
" big sectors %u exceeds fabric_max_sectors:"
" %u\n", cdb[0], sectors,
dev->dev_attrib.fabric_max_sectors);
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
}
if (sectors > dev->dev_attrib.hw_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds backend hw_max_sectors:"
" %u\n", cdb[0], sectors,
dev->dev_attrib.hw_max_sectors);
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
}
end_lba = dev->transport->get_blocks(dev) + 1;
@ -581,26 +587,13 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
pr_err("cmd exceeds last lba %llu "
"(lba %llu, sectors %u)\n",
end_lba, cmd->t_task_lba, sectors);
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, sectors);
}
ret = target_cmd_size_check(cmd, size);
if (ret < 0)
return ret;
return 0;
out_unsupported_cdb:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
return target_cmd_size_check(cmd, size);
}
EXPORT_SYMBOL(sbc_parse_cdb);

View File

@ -69,7 +69,8 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
static sense_reason_t
spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
{
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev;
@ -108,7 +109,8 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
}
/* unit serial number */
static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
static sense_reason_t
spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u16 len = 0;
@ -161,7 +163,8 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
*/
static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
static sense_reason_t
spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun;
@ -406,7 +409,8 @@ check_scsi_name:
}
/* Extended INQUIRY Data VPD Page */
static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
static sense_reason_t
spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
@ -419,7 +423,8 @@ static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
}
/* Block Limits VPD page */
static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u32 max_sectors;
@ -490,7 +495,8 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
}
/* Block Device Characteristics VPD page */
static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
static sense_reason_t
spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
@ -502,7 +508,8 @@ static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
}
/* Thin Provisioning VPD */
static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
static sense_reason_t
spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
@ -552,11 +559,12 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
static sense_reason_t
spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
static struct {
uint8_t page;
int (*emulate)(struct se_cmd *, unsigned char *);
sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
} evpd_handlers[] = {
{ .page = 0x00, .emulate = spc_emulate_evpd_00 },
{ .page = 0x80, .emulate = spc_emulate_evpd_80 },
@ -568,7 +576,8 @@ static struct {
};
/* supported vital product data pages */
static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
static sense_reason_t
spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
{
int p;
@ -586,14 +595,16 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
static int spc_emulate_inquiry(struct se_cmd *cmd)
static sense_reason_t
spc_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb;
unsigned char buf[SE_INQUIRY_BUF];
int p, ret;
sense_reason_t ret;
int p;
memset(buf, 0, SE_INQUIRY_BUF);
@ -606,8 +617,7 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
if (cdb[2]) {
pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
cdb[2]);
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
ret = -EINVAL;
ret = TCM_INVALID_CDB_FIELD;
goto out;
}
@ -624,15 +634,15 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
ret = -EINVAL;
ret = TCM_INVALID_CDB_FIELD;
out:
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
if (!rbuf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
}
if (!ret)
target_complete_cmd(cmd, GOOD);
@ -834,7 +844,7 @@ static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 bloc
return 17;
}
static int spc_emulate_modesense(struct se_cmd *cmd)
static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
@ -851,7 +861,8 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
int i;
map_buf = transport_kmap_data_sg(cmd);
if (!map_buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
* know we actually allocated a full page. Otherwise, if the
@ -864,8 +875,7 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL);
if (!buf) {
transport_kunmap_data_sg(cmd);
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
} else {
buf = map_buf;
@ -920,9 +930,10 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
if (page == 0x3f) {
if (subpage != 0x00 && subpage != 0xff) {
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
length = -EINVAL;
goto out;
pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
kfree(buf);
transport_kunmap_data_sg(cmd);
return TCM_INVALID_CDB_FIELD;
}
for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
@ -958,8 +969,8 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
page, subpage);
cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
return -EINVAL;
transport_kunmap_data_sg(cmd);
return TCM_UNKNOWN_MODE_PAGE;
set_length:
if (ten)
@ -967,7 +978,6 @@ set_length:
else
buf[0] = length - 1;
out:
if (buf != map_buf) {
memcpy(map_buf, buf, cmd->data_length);
kfree(buf);
@ -978,7 +988,7 @@ out:
return 0;
}
static int spc_emulate_modeselect(struct se_cmd *cmd)
static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
@ -993,10 +1003,11 @@ static int spc_emulate_modeselect(struct se_cmd *cmd)
int i;
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (!pf) {
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
ret = -EINVAL;
ret = TCM_INVALID_CDB_FIELD;
goto out;
}
@ -1011,15 +1022,12 @@ static int spc_emulate_modeselect(struct se_cmd *cmd)
goto check_contents;
}
cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
ret = -EINVAL;
ret = TCM_UNKNOWN_MODE_PAGE;
goto out;
check_contents:
if (memcmp(buf + off, tbuf, length)) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
}
if (memcmp(buf + off, tbuf, length))
ret = TCM_INVALID_PARAMETER_LIST;
out:
transport_kunmap_data_sg(cmd);
@ -1029,7 +1037,7 @@ out:
return ret;
}
static int spc_emulate_request_sense(struct se_cmd *cmd)
static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *rbuf;
@ -1041,19 +1049,14 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -ENOSYS;
return TCM_INVALID_CDB_FIELD;
}
rbuf = transport_kmap_data_sg(cmd);
if (cmd->scsi_sense_reason != 0) {
/*
* Out of memory. We will fail with CHECK CONDITION, so
* we must not clear the unit attention condition.
*/
target_complete_cmd(cmd, CHECK_CONDITION);
return 0;
} else if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
if (!rbuf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
/*
* CURRENT ERROR, UNIT ATTENTION
*/
@ -1080,16 +1083,14 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
buf[7] = 0x0A;
}
if (rbuf) {
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
}
target_complete_cmd(cmd, GOOD);
return 0;
}
int spc_emulate_report_luns(struct se_cmd *cmd)
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
@ -1099,13 +1100,12 @@ int spc_emulate_report_luns(struct se_cmd *cmd)
if (cmd->data_length < 16) {
pr_warn("REPORT LUNS allocation length %u too small\n",
cmd->data_length);
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
if (!buf)
return -ENOMEM;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* If no struct se_session pointer is present, this struct se_cmd is
@ -1153,13 +1153,15 @@ done:
}
EXPORT_SYMBOL(spc_emulate_report_luns);
static int spc_emulate_testunitready(struct se_cmd *cmd)
static sense_reason_t
spc_emulate_testunitready(struct se_cmd *cmd)
{
target_complete_cmd(cmd, GOOD);
return 0;
}
int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
sense_reason_t
spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
@ -1300,9 +1302,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->get_fabric_name(), cdb[0]);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
return 0;

View File

@ -557,7 +557,8 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
transport_generic_request_failure(cmd);
transport_generic_request_failure(cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
}
/*
@ -625,7 +626,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
complete(&cmd->t_transport_stop_comp);
return;
} else if (cmd->transport_state & CMD_T_FAILED) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
INIT_WORK(&cmd->work, target_complete_ok_work);
@ -976,7 +976,8 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
}
EXPORT_SYMBOL(transport_set_vpd_ident);
int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
struct se_device *dev = cmd->se_dev;
@ -991,7 +992,7 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
if (cmd->data_direction == DMA_TO_DEVICE) {
pr_err("Rejecting underflow/overflow"
" WRITE data\n");
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
}
/*
* Reject READ_* or WRITE_* with overflow/underflow for
@ -1002,7 +1003,7 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
" CDB on non 512-byte sector setup subsystem"
" plugin: %s\n", dev->transport->name);
/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
goto out_invalid_cdb_field;
return TCM_INVALID_CDB_FIELD;
}
/*
* For the overflow case keep the existing fabric provided
@ -1022,10 +1023,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
return 0;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
@ -1065,7 +1062,8 @@ void transport_init_se_cmd(
}
EXPORT_SYMBOL(transport_init_se_cmd);
static int transport_check_alloc_task_attr(struct se_cmd *cmd)
static sense_reason_t
transport_check_alloc_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@ -1079,7 +1077,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
if (cmd->sam_task_attr == MSG_ACA_TAG) {
pr_debug("SAM Task Attribute ACA"
" emulation is not supported\n");
return -EINVAL;
return TCM_INVALID_CDB_FIELD;
}
/*
* Used to determine when ORDERED commands should go from
@ -1093,17 +1091,12 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
return 0;
}
/* target_setup_cmd_from_cdb():
*
* Called from fabric RX Thread.
*/
int target_setup_cmd_from_cdb(
struct se_cmd *cmd,
unsigned char *cdb)
sense_reason_t
target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
{
struct se_device *dev = cmd->se_dev;
unsigned long flags;
int ret;
sense_reason_t ret;
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
@ -1113,9 +1106,7 @@ int target_setup_cmd_from_cdb(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
return TCM_INVALID_CDB_FIELD;
}
/*
* If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
@ -1130,10 +1121,7 @@ int target_setup_cmd_from_cdb(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
return TCM_OUT_OF_RESOURCES;
}
} else
cmd->t_task_cdb = &cmd->__t_task_cdb[0];
@ -1145,50 +1133,30 @@ int target_setup_cmd_from_cdb(
/*
* Check for an existing UNIT ATTENTION condition
*/
if (core_scsi3_ua_check(cmd, cdb) < 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
return -EINVAL;
}
ret = target_scsi3_ua_check(cmd);
if (ret)
return ret;
ret = target_alua_state_check(cmd);
if (ret) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
if (ret > 0)
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
else
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
* Check status for SPC-3 Persistent Reservations
*/
ret = target_check_reservation(cmd);
if (ret) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
if (ret)
return ret;
ret = target_check_reservation(cmd);
if (ret)
return ret;
}
ret = dev->transport->parse_cdb(cmd);
if (ret < 0)
if (ret)
return ret;
ret = transport_check_alloc_task_attr(cmd);
if (ret)
return ret;
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/*
* Check for SAM Task Attribute Emulation
*/
if (transport_check_alloc_task_attr(cmd) < 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep)
cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
@ -1204,7 +1172,7 @@ EXPORT_SYMBOL(target_setup_cmd_from_cdb);
int transport_handle_cdb_direct(
struct se_cmd *cmd)
{
int ret;
sense_reason_t ret;
if (!cmd->se_lun) {
dump_stack();
@ -1234,13 +1202,41 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
if (ret < 0)
transport_generic_request_failure(cmd);
if (ret)
transport_generic_request_failure(cmd, ret);
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
static sense_reason_t
transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
{
if (!sgl || !sgl_count)
return 0;
/*
* Reject SCSI data overflow with map_mem_to_cmd() as incoming
* scatterlists already have been set to follow what the fabric
* passes for the original expected data transfer length.
*/
if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
pr_warn("Rejecting SCSI DATA overflow for fabric using"
" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
return TCM_INVALID_CDB_FIELD;
}
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
if (sgl_bidi && sgl_bidi_count) {
cmd->t_bidi_data_sg = sgl_bidi;
cmd->t_bidi_data_nents = sgl_bidi_count;
}
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
return 0;
}
/*
* target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
* se_cmd + use pre-allocated SGL memory.
@ -1273,7 +1269,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
{
struct se_portal_group *se_tpg;
int rc;
sense_reason_t rc;
int ret;
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
@ -1294,9 +1291,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
* for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
if (rc)
return rc;
ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
if (ret)
return ret;
/*
* Signal bidirectional data payloads to target-core
*/
@ -1305,16 +1302,16 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
/*
* Locate se_lun pointer and attach it to struct se_cmd
*/
if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
if (rc) {
transport_send_check_condition_and_sense(se_cmd, rc, 0);
target_put_sess_cmd(se_sess, se_cmd);
return 0;
}
rc = target_setup_cmd_from_cdb(se_cmd, cdb);
if (rc != 0) {
transport_generic_request_failure(se_cmd);
transport_generic_request_failure(se_cmd, rc);
return 0;
}
/*
@ -1349,7 +1346,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
sgl_bidi, sgl_bidi_count);
if (rc != 0) {
transport_generic_request_failure(se_cmd);
transport_generic_request_failure(se_cmd, rc);
return 0;
}
}
@ -1495,16 +1492,17 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
void transport_generic_request_failure(struct se_cmd *cmd)
void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
cmd->t_state, cmd->scsi_sense_reason);
cmd->t_state, sense_reason);
pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
(cmd->transport_state & CMD_T_ACTIVE) != 0,
(cmd->transport_state & CMD_T_STOP) != 0,
@ -1515,7 +1513,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
*/
transport_complete_task_attr(cmd);
switch (cmd->scsi_sense_reason) {
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_INVALID_CDB_FIELD:
@ -1528,6 +1526,9 @@ void transport_generic_request_failure(struct se_cmd *cmd)
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
break;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
@ -1555,13 +1556,12 @@ void transport_generic_request_failure(struct se_cmd *cmd)
goto check_stop;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
cmd->t_task_cdb[0], cmd->scsi_sense_reason);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
cmd->t_task_cdb[0], sense_reason);
sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
ret = transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0);
ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
@ -1579,21 +1579,21 @@ EXPORT_SYMBOL(transport_generic_request_failure);
static void __target_execute_cmd(struct se_cmd *cmd)
{
int error = 0;
sense_reason_t ret;
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
spin_unlock_irq(&cmd->t_state_lock);
if (cmd->execute_cmd)
error = cmd->execute_cmd(cmd);
if (error) {
if (cmd->execute_cmd) {
ret = cmd->execute_cmd(cmd);
if (ret) {
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
spin_unlock_irq(&cmd->t_state_lock);
transport_generic_request_failure(cmd);
transport_generic_request_failure(cmd, ret);
}
}
}
@ -1987,53 +1987,6 @@ out_busy:
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
/*
* transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
* allocating in the core.
* @cmd: Associated se_cmd descriptor
* @mem: SGL style memory for TCM WRITE / READ
* @sg_mem_num: Number of SGL elements
* @mem_bidi_in: SGL style memory for TCM BIDI READ
* @sg_mem_bidi_num: Number of BIDI READ SGL elements
*
* Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
* of parameters.
*/
int transport_generic_map_mem_to_cmd(
struct se_cmd *cmd,
struct scatterlist *sgl,
u32 sgl_count,
struct scatterlist *sgl_bidi,
u32 sgl_bidi_count)
{
if (!sgl || !sgl_count)
return 0;
/*
* Reject SCSI data overflow with map_mem_to_cmd() as incoming
* scatterlists already have been set to follow what the fabric
* passes for the original expected data transfer length.
*/
if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
pr_warn("Rejecting SCSI DATA overflow for fabric using"
" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
if (sgl_bidi && sgl_bidi_count) {
cmd->t_bidi_data_sg = sgl_bidi;
cmd->t_bidi_data_nents = sgl_bidi_count;
}
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
@ -2054,10 +2007,8 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
/* >1 page. use vmap */
pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
if (!pages) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (!pages)
return NULL;
}
/* convert sg[] to pages[] */
for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
@ -2066,10 +2017,8 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (!cmd->t_data_vmap) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (!cmd->t_data_vmap)
return NULL;
}
return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
}
@ -2135,7 +2084,8 @@ out:
* might not have the payload yet, so notify the fabric via a call to
* ->write_pending instead. Otherwise place it on the execution queue.
*/
int transport_generic_new_cmd(struct se_cmd *cmd)
sense_reason_t
transport_generic_new_cmd(struct se_cmd *cmd)
{
int ret = 0;
@ -2148,7 +2098,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
goto out_fail;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
atomic_inc(&cmd->t_fe_count);
@ -2174,14 +2124,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
if (ret < 0)
return ret;
return 1;
/* fabric drivers should only return -EAGAIN or -ENOMEM as error */
WARN_ON(ret);
return 0;
out_fail:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
@ -2625,10 +2572,9 @@ static int transport_get_sense_codes(
return 0;
}
int transport_send_check_condition_and_sense(
struct se_cmd *cmd,
u8 reason,
int from_transport)
int
transport_send_check_condition_and_sense(struct se_cmd *cmd,
sense_reason_t reason, int from_transport)
{
unsigned char *buffer = cmd->sense_buffer;
unsigned long flags;

View File

@ -38,9 +38,8 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
int core_scsi3_ua_check(
struct se_cmd *cmd,
unsigned char *cdb)
sense_reason_t
target_scsi3_ua_check(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
@ -71,16 +70,14 @@ int core_scsi3_ua_check(
* was received, then the device server shall process the command
* and either:
*/
switch (cdb[0]) {
switch (cmd->t_task_cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
case REQUEST_SENSE:
return 0;
default:
return -EINVAL;
return TCM_CHECK_CONDITION_UNIT_ATTENTION;
}
return -EINVAL;
}
int core_scsi3_ua_allocate(

View File

@ -26,7 +26,7 @@
extern struct kmem_cache *se_ua_cache;
extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);

View File

@ -541,10 +541,6 @@ static void tcm_vhost_submission_work(struct work_struct *work)
if (tv_cmd->tvc_sgl_count) {
sg_ptr = tv_cmd->tvc_sgl;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
#if 0
if (se_cmd->se_cmd_flags & SCF_BIDI) {

View File

@ -31,17 +31,17 @@ struct se_subsystem_api {
struct scatterlist *,
unsigned char *);
int (*parse_cdb)(struct se_cmd *cmd);
sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
unsigned char *(*get_sense_buffer)(struct se_cmd *);
};
struct sbc_ops {
int (*execute_rw)(struct se_cmd *cmd);
int (*execute_sync_cache)(struct se_cmd *cmd);
int (*execute_write_same)(struct se_cmd *cmd);
int (*execute_unmap)(struct se_cmd *cmd);
sense_reason_t (*execute_rw)(struct se_cmd *cmd);
sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
sense_reason_t (*execute_unmap)(struct se_cmd *cmd);
};
int transport_subsystem_register(struct se_subsystem_api *);
@ -49,11 +49,11 @@ void transport_subsystem_release(struct se_subsystem_api *);
void target_complete_cmd(struct se_cmd *, u8);
int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
int spc_emulate_report_luns(struct se_cmd *cmd);
sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
int spc_get_write_same_sectors(struct se_cmd *cmd);
int sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
u32 sbc_get_device_rev(struct se_device *dev);
u32 sbc_get_device_type(struct se_device *dev);

View File

@ -144,8 +144,6 @@ enum se_cmd_flags_table {
SCF_EMULATED_TASK_SENSE = 0x00000004,
SCF_SCSI_DATA_CDB = 0x00000008,
SCF_SCSI_TMR_CDB = 0x00000010,
SCF_SCSI_CDB_EXCEPTION = 0x00000020,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000040,
SCF_FUA = 0x00000080,
SCF_SE_LUN_CMD = 0x00000100,
SCF_BIDI = 0x00000400,
@ -167,27 +165,32 @@ enum transport_lunflags_table {
};
/*
* Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
* Used by transport_send_check_condition_and_sense()
* to signal which ASC/ASCQ sense payload should be built.
*/
typedef unsigned __bitwise__ sense_reason_t;
enum tcm_sense_reason_table {
TCM_NON_EXISTENT_LUN = 0x01,
TCM_UNSUPPORTED_SCSI_OPCODE = 0x02,
TCM_INCORRECT_AMOUNT_OF_DATA = 0x03,
TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04,
TCM_SERVICE_CRC_ERROR = 0x05,
TCM_SNACK_REJECTED = 0x06,
TCM_SECTOR_COUNT_TOO_MANY = 0x07,
TCM_INVALID_CDB_FIELD = 0x08,
TCM_INVALID_PARAMETER_LIST = 0x09,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a,
TCM_UNKNOWN_MODE_PAGE = 0x0b,
TCM_WRITE_PROTECTED = 0x0c,
TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f,
TCM_RESERVATION_CONFLICT = 0x10,
TCM_ADDRESS_OUT_OF_RANGE = 0x11,
#define R(x) (__force sense_reason_t )(x)
TCM_NON_EXISTENT_LUN = R(0x01),
TCM_UNSUPPORTED_SCSI_OPCODE = R(0x02),
TCM_INCORRECT_AMOUNT_OF_DATA = R(0x03),
TCM_UNEXPECTED_UNSOLICITED_DATA = R(0x04),
TCM_SERVICE_CRC_ERROR = R(0x05),
TCM_SNACK_REJECTED = R(0x06),
TCM_SECTOR_COUNT_TOO_MANY = R(0x07),
TCM_INVALID_CDB_FIELD = R(0x08),
TCM_INVALID_PARAMETER_LIST = R(0x09),
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = R(0x0a),
TCM_UNKNOWN_MODE_PAGE = R(0x0b),
TCM_WRITE_PROTECTED = R(0x0c),
TCM_CHECK_CONDITION_ABORT_CMD = R(0x0d),
TCM_CHECK_CONDITION_UNIT_ATTENTION = R(0x0e),
TCM_CHECK_CONDITION_NOT_READY = R(0x0f),
TCM_RESERVATION_CONFLICT = R(0x10),
TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
TCM_OUT_OF_RESOURCES = R(0x12),
#undef R
};
enum target_sc_flags_table {
@ -407,7 +410,6 @@ struct se_cmd {
u8 scsi_status;
u8 scsi_asc;
u8 scsi_ascq;
u8 scsi_sense_reason;
u16 scsi_sense_length;
/* Delay for ALUA Active/NonOptimized state access in milliseconds */
int alua_nonop_delay;
@ -445,7 +447,7 @@ struct se_cmd {
struct completion cmd_wait_comp;
struct kref cmd_kref;
struct target_core_fabric_ops *se_tfo;
int (*execute_cmd)(struct se_cmd *);
sense_reason_t (*execute_cmd)(struct se_cmd *);
void (*transport_complete_callback)(struct se_cmd *);
unsigned char *t_task_cdb;

View File

@ -98,8 +98,8 @@ void transport_deregister_session(struct se_session *);
void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
struct se_session *, u32, int, int, unsigned char *);
int transport_lookup_cmd_lun(struct se_cmd *, u32);
int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32);
sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
unsigned char *, unsigned char *, u32, u32, int, int, int,
struct scatterlist *, u32, struct scatterlist *, u32);
@ -110,9 +110,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t, unsigned int, int);
int transport_handle_cdb_direct(struct se_cmd *);
int transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
struct scatterlist *, u32, struct scatterlist *, u32);
int transport_generic_new_cmd(struct se_cmd *);
sense_reason_t transport_generic_new_cmd(struct se_cmd *);
void target_execute_cmd(struct se_cmd *cmd);
@ -120,7 +118,8 @@ void transport_generic_free_cmd(struct se_cmd *, int);
bool transport_wait_for_tasks(struct se_cmd *);
int transport_check_aborted_status(struct se_cmd *, int);
int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
int transport_send_check_condition_and_sense(struct se_cmd *,
sense_reason_t, int);
int target_put_sess_cmd(struct se_session *, struct se_cmd *);
void target_sess_cmd_list_set_waiting(struct se_session *);
@ -131,7 +130,7 @@ int core_alua_check_nonop_delay(struct se_cmd *);
int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
int transport_lookup_tmr_lun(struct se_cmd *, u32);
struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,