Merge remote-tracking branch 'scsi-queue/drivers-for-3.19' into for-linus
This commit is contained in:
commit
e617457691
|
@ -2740,7 +2740,6 @@ static struct scsi_host_template srp_template = {
|
||||||
.info = srp_target_info,
|
.info = srp_target_info,
|
||||||
.queuecommand = srp_queuecommand,
|
.queuecommand = srp_queuecommand,
|
||||||
.change_queue_depth = srp_change_queue_depth,
|
.change_queue_depth = srp_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.eh_abort_handler = srp_abort,
|
.eh_abort_handler = srp_abort,
|
||||||
.eh_device_reset_handler = srp_reset_device,
|
.eh_device_reset_handler = srp_reset_device,
|
||||||
.eh_host_reset_handler = srp_reset_host,
|
.eh_host_reset_handler = srp_reset_host,
|
||||||
|
|
|
@ -1708,17 +1708,17 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
|
||||||
|
|
||||||
switch (srp_cmd->task_attr) {
|
switch (srp_cmd->task_attr) {
|
||||||
case SRP_CMD_SIMPLE_Q:
|
case SRP_CMD_SIMPLE_Q:
|
||||||
cmd->sam_task_attr = MSG_SIMPLE_TAG;
|
cmd->sam_task_attr = TCM_SIMPLE_TAG;
|
||||||
break;
|
break;
|
||||||
case SRP_CMD_ORDERED_Q:
|
case SRP_CMD_ORDERED_Q:
|
||||||
default:
|
default:
|
||||||
cmd->sam_task_attr = MSG_ORDERED_TAG;
|
cmd->sam_task_attr = TCM_ORDERED_TAG;
|
||||||
break;
|
break;
|
||||||
case SRP_CMD_HEAD_OF_Q:
|
case SRP_CMD_HEAD_OF_Q:
|
||||||
cmd->sam_task_attr = MSG_HEAD_TAG;
|
cmd->sam_task_attr = TCM_HEAD_TAG;
|
||||||
break;
|
break;
|
||||||
case SRP_CMD_ACA:
|
case SRP_CMD_ACA:
|
||||||
cmd->sam_task_attr = MSG_ACA_TAG;
|
cmd->sam_task_attr = TCM_ACA_TAG;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1733,7 +1733,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
|
||||||
sizeof(srp_cmd->lun));
|
sizeof(srp_cmd->lun));
|
||||||
rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
|
rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
|
||||||
&send_ioctx->sense_data[0], unpacked_lun, data_len,
|
&send_ioctx->sense_data[0], unpacked_lun, data_len,
|
||||||
MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
|
TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
goto send_sense;
|
goto send_sense;
|
||||||
|
|
|
@ -176,7 +176,6 @@ STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
|
||||||
STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
|
STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
|
||||||
STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
|
STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
|
||||||
static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
|
static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
|
||||||
static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
|
|
||||||
|
|
||||||
STATIC struct device_attribute *NCR_700_dev_attrs[];
|
STATIC struct device_attribute *NCR_700_dev_attrs[];
|
||||||
|
|
||||||
|
@ -326,7 +325,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
|
||||||
tpnt->slave_destroy = NCR_700_slave_destroy;
|
tpnt->slave_destroy = NCR_700_slave_destroy;
|
||||||
tpnt->slave_alloc = NCR_700_slave_alloc;
|
tpnt->slave_alloc = NCR_700_slave_alloc;
|
||||||
tpnt->change_queue_depth = NCR_700_change_queue_depth;
|
tpnt->change_queue_depth = NCR_700_change_queue_depth;
|
||||||
tpnt->change_queue_type = NCR_700_change_queue_type;
|
|
||||||
tpnt->use_blk_tags = 1;
|
tpnt->use_blk_tags = 1;
|
||||||
|
|
||||||
if(tpnt->name == NULL)
|
if(tpnt->name == NULL)
|
||||||
|
@ -904,8 +902,8 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
|
||||||
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
|
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
|
||||||
|
|
||||||
SCp->device->tagged_supported = 0;
|
SCp->device->tagged_supported = 0;
|
||||||
|
SCp->device->simple_tags = 0;
|
||||||
scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
|
scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
|
||||||
scsi_set_tag_type(SCp->device, 0);
|
|
||||||
} else {
|
} else {
|
||||||
shost_printk(KERN_WARNING, host,
|
shost_printk(KERN_WARNING, host,
|
||||||
"(%d:%d) Unexpected REJECT Message %s\n",
|
"(%d:%d) Unexpected REJECT Message %s\n",
|
||||||
|
@ -1818,8 +1816,8 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
|
||||||
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
|
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
|
||||||
}
|
}
|
||||||
|
|
||||||
if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
|
if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
|
||||||
&& scsi_get_tag_type(SCp->device)) {
|
SCp->device->simple_tags) {
|
||||||
slot->tag = SCp->request->tag;
|
slot->tag = SCp->request->tag;
|
||||||
CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
|
CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
|
||||||
slot->tag, slot);
|
slot->tag, slot);
|
||||||
|
@ -2082,39 +2080,6 @@ NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
|
||||||
return scsi_change_queue_depth(SDp, depth);
|
return scsi_change_queue_depth(SDp, depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
|
|
||||||
{
|
|
||||||
int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
|
|
||||||
|| (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
|
|
||||||
struct NCR_700_Host_Parameters *hostdata =
|
|
||||||
(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
|
|
||||||
|
|
||||||
/* We have a global (per target) flag to track whether TCQ is
|
|
||||||
* enabled, so we'll be turning it off for the entire target here.
|
|
||||||
* our tag algorithm will fail if we mix tagged and untagged commands,
|
|
||||||
* so quiesce the device before doing this */
|
|
||||||
if (change_tag)
|
|
||||||
scsi_target_quiesce(SDp->sdev_target);
|
|
||||||
|
|
||||||
scsi_set_tag_type(SDp, tag_type);
|
|
||||||
if (!tag_type) {
|
|
||||||
/* shift back to the default unqueued number of commands
|
|
||||||
* (the user can still raise this) */
|
|
||||||
scsi_change_queue_depth(SDp, SDp->host->cmd_per_lun);
|
|
||||||
hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
|
|
||||||
} else {
|
|
||||||
/* Here, we cleared the negotiation flag above, so this
|
|
||||||
* will force the driver to renegotiate */
|
|
||||||
scsi_change_queue_depth(SDp, SDp->queue_depth);
|
|
||||||
if (change_tag)
|
|
||||||
NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
|
|
||||||
}
|
|
||||||
if (change_tag)
|
|
||||||
scsi_target_resume(SDp->sdev_target);
|
|
||||||
|
|
||||||
return tag_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
|
NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1462,18 +1462,17 @@ config SCSI_WD719X
|
||||||
SCSI controllers (based on WD33C296A chip).
|
SCSI controllers (based on WD33C296A chip).
|
||||||
|
|
||||||
config SCSI_DEBUG
|
config SCSI_DEBUG
|
||||||
tristate "SCSI debugging host simulator"
|
tristate "SCSI debugging host and device simulator"
|
||||||
depends on SCSI
|
depends on SCSI
|
||||||
select CRC_T10DIF
|
select CRC_T10DIF
|
||||||
help
|
help
|
||||||
This is a host adapter simulator that can simulate multiple hosts
|
This pseudo driver simulates one or more hosts (SCSI initiators),
|
||||||
each with multiple dummy SCSI devices (disks). It defaults to one
|
each with one or more targets, each with one or more logical units.
|
||||||
host adapter with one dummy SCSI disk. Each dummy disk uses kernel
|
Defaults to one of each, creating a small RAM disk device. Many
|
||||||
RAM as storage (i.e. it is a ramdisk). To save space when multiple
|
parameters found in the /sys/bus/pseudo/drivers/scsi_debug
|
||||||
dummy disks are simulated, they share the same kernel RAM for
|
directory can be tweaked at run time.
|
||||||
their storage. See <http://sg.danny.cz/sg/sdebug26.html> for more
|
See <http://sg.danny.cz/sg/sdebug26.html> for more information.
|
||||||
information. This driver is primarily of use to those testing the
|
Mainly used for testing and best as a module. If unsure, say N.
|
||||||
SCSI and block subsystems. If unsure, say N.
|
|
||||||
|
|
||||||
config SCSI_MESH
|
config SCSI_MESH
|
||||||
tristate "MESH (Power Mac internal SCSI) support"
|
tristate "MESH (Power Mac internal SCSI) support"
|
||||||
|
|
|
@ -7921,9 +7921,9 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
|
||||||
*/
|
*/
|
||||||
if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) &&
|
if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) &&
|
||||||
(boardp->reqcnt[scp->device->id] % 255) == 0) {
|
(boardp->reqcnt[scp->device->id] % 255) == 0) {
|
||||||
asc_scsi_q->q2.tag_code = MSG_ORDERED_TAG;
|
asc_scsi_q->q2.tag_code = ORDERED_QUEUE_TAG;
|
||||||
} else {
|
} else {
|
||||||
asc_scsi_q->q2.tag_code = MSG_SIMPLE_TAG;
|
asc_scsi_q->q2.tag_code = SIMPLE_QUEUE_TAG;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Build ASC_SCSI_Q */
|
/* Build ASC_SCSI_Q */
|
||||||
|
@ -8351,7 +8351,7 @@ static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
|
||||||
}
|
}
|
||||||
q_addr = ASC_QNO_TO_QADDR(q_no);
|
q_addr = ASC_QNO_TO_QADDR(q_no);
|
||||||
if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) {
|
if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) {
|
||||||
scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG;
|
scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
|
||||||
}
|
}
|
||||||
scsiq->q1.status = QS_FREE;
|
scsiq->q1.status = QS_FREE;
|
||||||
AscMemWordCopyPtrToLram(iop_base,
|
AscMemWordCopyPtrToLram(iop_base,
|
||||||
|
@ -8669,7 +8669,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (disable_syn_offset_one_fix) {
|
if (disable_syn_offset_one_fix) {
|
||||||
scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG;
|
scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
|
||||||
scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX |
|
scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX |
|
||||||
ASC_TAG_FLAG_DISABLE_DISCONNECT);
|
ASC_TAG_FLAG_DISABLE_DISCONNECT);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -63,7 +63,6 @@ static struct scsi_host_template aic94xx_sht = {
|
||||||
.scan_finished = asd_scan_finished,
|
.scan_finished = asd_scan_finished,
|
||||||
.scan_start = asd_scan_start,
|
.scan_start = asd_scan_start,
|
||||||
.change_queue_depth = sas_change_queue_depth,
|
.change_queue_depth = sas_change_queue_depth,
|
||||||
.change_queue_type = sas_change_queue_type,
|
|
||||||
.bios_param = sas_bios_param,
|
.bios_param = sas_bios_param,
|
||||||
.can_queue = 1,
|
.can_queue = 1,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
|
|
|
@ -2792,7 +2792,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
|
||||||
.eh_host_reset_handler = fc_eh_host_reset,
|
.eh_host_reset_handler = fc_eh_host_reset,
|
||||||
.slave_alloc = fc_slave_alloc,
|
.slave_alloc = fc_slave_alloc,
|
||||||
.change_queue_depth = scsi_change_queue_depth,
|
.change_queue_depth = scsi_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.cmd_per_lun = 3,
|
.cmd_per_lun = 3,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
|
|
@ -1737,11 +1737,7 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
|
||||||
fcp_cmnd->fc_pri_ta = 0;
|
fcp_cmnd->fc_pri_ta = 0;
|
||||||
fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
|
fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
|
||||||
fcp_cmnd->fc_flags = io_req->io_req_flags;
|
fcp_cmnd->fc_flags = io_req->io_req_flags;
|
||||||
|
fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
|
||||||
if (sc_cmd->flags & SCMD_TAGGED)
|
|
||||||
fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
|
|
||||||
else
|
|
||||||
fcp_cmnd->fc_pri_ta = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
|
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
|
||||||
|
|
|
@ -172,10 +172,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
|
||||||
fcp_cmnd->fc_cmdref = 0;
|
fcp_cmnd->fc_cmdref = 0;
|
||||||
|
|
||||||
memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
|
memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
|
||||||
if (scmnd->flags & SCMD_TAGGED)
|
fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
|
||||||
fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
|
|
||||||
else
|
|
||||||
fcp_cmnd->fc_pri_ta = 0;
|
|
||||||
fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
|
fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
|
||||||
|
|
||||||
if (req->nsge)
|
if (req->nsge)
|
||||||
|
|
|
@ -684,9 +684,9 @@ static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
|
||||||
* 1) verify the fi_version is correct
|
* 1) verify the fi_version is correct
|
||||||
* 2) verify the checksum of the entire image.
|
* 2) verify the checksum of the entire image.
|
||||||
* 3) validate the adap_typ, action and length fields.
|
* 3) validate the adap_typ, action and length fields.
|
||||||
* 4) valdiate each component header. check the img_type and
|
* 4) validate each component header. check the img_type and
|
||||||
* length fields
|
* length fields
|
||||||
* 5) valdiate each component image. validate signatures and
|
* 5) validate each component image. validate signatures and
|
||||||
* local checksums
|
* local checksums
|
||||||
*/
|
*/
|
||||||
static bool verify_fi(struct esas2r_adapter *a,
|
static bool verify_fi(struct esas2r_adapter *a,
|
||||||
|
|
|
@ -255,7 +255,6 @@ static struct scsi_host_template driver_template = {
|
||||||
.emulated = 0,
|
.emulated = 0,
|
||||||
.proc_name = ESAS2R_DRVR_NAME,
|
.proc_name = ESAS2R_DRVR_NAME,
|
||||||
.change_queue_depth = scsi_change_queue_depth,
|
.change_queue_depth = scsi_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.max_sectors = 0xFFFF,
|
.max_sectors = 0xFFFF,
|
||||||
.use_blk_tags = 1,
|
.use_blk_tags = 1,
|
||||||
};
|
};
|
||||||
|
|
|
@ -281,7 +281,6 @@ static struct scsi_host_template fcoe_shost_template = {
|
||||||
.eh_host_reset_handler = fc_eh_host_reset,
|
.eh_host_reset_handler = fc_eh_host_reset,
|
||||||
.slave_alloc = fc_slave_alloc,
|
.slave_alloc = fc_slave_alloc,
|
||||||
.change_queue_depth = scsi_change_queue_depth,
|
.change_queue_depth = scsi_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.cmd_per_lun = 3,
|
.cmd_per_lun = 3,
|
||||||
.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
|
.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
|
||||||
|
|
|
@ -111,7 +111,6 @@ static struct scsi_host_template fnic_host_template = {
|
||||||
.eh_host_reset_handler = fnic_host_reset,
|
.eh_host_reset_handler = fnic_host_reset,
|
||||||
.slave_alloc = fnic_slave_alloc,
|
.slave_alloc = fnic_slave_alloc,
|
||||||
.change_queue_depth = scsi_change_queue_depth,
|
.change_queue_depth = scsi_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.cmd_per_lun = 3,
|
.cmd_per_lun = 3,
|
||||||
.can_queue = FNIC_DFLT_IO_REQ,
|
.can_queue = FNIC_DFLT_IO_REQ,
|
||||||
|
|
|
@ -1615,7 +1615,6 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
|
||||||
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
|
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
|
||||||
struct ibmvfc_cmd *vfc_cmd;
|
struct ibmvfc_cmd *vfc_cmd;
|
||||||
struct ibmvfc_event *evt;
|
struct ibmvfc_event *evt;
|
||||||
u8 tag[2];
|
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
|
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
|
||||||
|
@ -3089,7 +3088,6 @@ static struct scsi_host_template driver_template = {
|
||||||
.target_alloc = ibmvfc_target_alloc,
|
.target_alloc = ibmvfc_target_alloc,
|
||||||
.scan_finished = ibmvfc_scan_finished,
|
.scan_finished = ibmvfc_scan_finished,
|
||||||
.change_queue_depth = ibmvfc_change_queue_depth,
|
.change_queue_depth = ibmvfc_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.cmd_per_lun = 16,
|
.cmd_per_lun = 16,
|
||||||
.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
|
.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
|
|
|
@ -1426,16 +1426,14 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
|
||||||
if (res->sdev) {
|
if (res->sdev) {
|
||||||
res->del_from_ml = 1;
|
res->del_from_ml = 1;
|
||||||
res->res_handle = IPR_INVALID_RES_HANDLE;
|
res->res_handle = IPR_INVALID_RES_HANDLE;
|
||||||
if (ioa_cfg->allow_ml_add_del)
|
schedule_work(&ioa_cfg->work_q);
|
||||||
schedule_work(&ioa_cfg->work_q);
|
|
||||||
} else {
|
} else {
|
||||||
ipr_clear_res_target(res);
|
ipr_clear_res_target(res);
|
||||||
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
|
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
|
||||||
}
|
}
|
||||||
} else if (!res->sdev || res->del_from_ml) {
|
} else if (!res->sdev || res->del_from_ml) {
|
||||||
res->add_to_ml = 1;
|
res->add_to_ml = 1;
|
||||||
if (ioa_cfg->allow_ml_add_del)
|
schedule_work(&ioa_cfg->work_q);
|
||||||
schedule_work(&ioa_cfg->work_q);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
|
ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
|
||||||
|
@ -3273,8 +3271,7 @@ static void ipr_worker_thread(struct work_struct *work)
|
||||||
restart:
|
restart:
|
||||||
do {
|
do {
|
||||||
did_work = 0;
|
did_work = 0;
|
||||||
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
|
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
|
||||||
!ioa_cfg->allow_ml_add_del) {
|
|
||||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -3311,6 +3308,7 @@ restart:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ioa_cfg->scan_done = 1;
|
||||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||||
kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
|
kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
|
||||||
LEAVE;
|
LEAVE;
|
||||||
|
@ -4345,30 +4343,6 @@ static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
|
||||||
return sdev->queue_depth;
|
return sdev->queue_depth;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* ipr_change_queue_type - Change the device's queue type
|
|
||||||
* @dsev: scsi device struct
|
|
||||||
* @tag_type: type of tags to use
|
|
||||||
*
|
|
||||||
* Return value:
|
|
||||||
* actual queue type set
|
|
||||||
**/
|
|
||||||
static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
|
|
||||||
{
|
|
||||||
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
|
|
||||||
struct ipr_resource_entry *res;
|
|
||||||
unsigned long lock_flags = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
|
||||||
res = (struct ipr_resource_entry *)sdev->hostdata;
|
|
||||||
if (res && ipr_is_gscsi(res))
|
|
||||||
tag_type = scsi_change_queue_type(sdev, tag_type);
|
|
||||||
else
|
|
||||||
tag_type = 0;
|
|
||||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
|
||||||
return tag_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ipr_show_adapter_handle - Show the adapter's resource handle for this device
|
* ipr_show_adapter_handle - Show the adapter's resource handle for this device
|
||||||
* @dev: device struct
|
* @dev: device struct
|
||||||
|
@ -4739,6 +4713,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
|
||||||
sdev->no_uld_attach = 1;
|
sdev->no_uld_attach = 1;
|
||||||
}
|
}
|
||||||
if (ipr_is_vset_device(res)) {
|
if (ipr_is_vset_device(res)) {
|
||||||
|
sdev->scsi_level = SCSI_SPC_3;
|
||||||
blk_queue_rq_timeout(sdev->request_queue,
|
blk_queue_rq_timeout(sdev->request_queue,
|
||||||
IPR_VSET_RW_TIMEOUT);
|
IPR_VSET_RW_TIMEOUT);
|
||||||
blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
|
blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
|
||||||
|
@ -5231,6 +5206,28 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
|
||||||
* @scsi_cmd: scsi command struct
|
* @scsi_cmd: scsi command struct
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
|
* 0 if scan in progress / 1 if scan is complete
|
||||||
|
**/
|
||||||
|
static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
|
||||||
|
{
|
||||||
|
unsigned long lock_flags;
|
||||||
|
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
spin_lock_irqsave(shost->host_lock, lock_flags);
|
||||||
|
if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
|
||||||
|
rc = 1;
|
||||||
|
if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
|
||||||
|
rc = 1;
|
||||||
|
spin_unlock_irqrestore(shost->host_lock, lock_flags);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ipr_eh_host_reset - Reset the host adapter
|
||||||
|
* @scsi_cmd: scsi command struct
|
||||||
|
*
|
||||||
|
* Return value:
|
||||||
* SUCCESS / FAILED
|
* SUCCESS / FAILED
|
||||||
**/
|
**/
|
||||||
static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
|
static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
|
||||||
|
@ -5779,7 +5776,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
|
||||||
|
|
||||||
ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
|
ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
|
||||||
|
|
||||||
if (!scsi_get_tag_type(scsi_cmd->device)) {
|
if (!scsi_cmd->device->simple_tags) {
|
||||||
ipr_erp_request_sense(ipr_cmd);
|
ipr_erp_request_sense(ipr_cmd);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -6299,10 +6296,10 @@ static struct scsi_host_template driver_template = {
|
||||||
.slave_alloc = ipr_slave_alloc,
|
.slave_alloc = ipr_slave_alloc,
|
||||||
.slave_configure = ipr_slave_configure,
|
.slave_configure = ipr_slave_configure,
|
||||||
.slave_destroy = ipr_slave_destroy,
|
.slave_destroy = ipr_slave_destroy,
|
||||||
|
.scan_finished = ipr_scan_finished,
|
||||||
.target_alloc = ipr_target_alloc,
|
.target_alloc = ipr_target_alloc,
|
||||||
.target_destroy = ipr_target_destroy,
|
.target_destroy = ipr_target_destroy,
|
||||||
.change_queue_depth = ipr_change_queue_depth,
|
.change_queue_depth = ipr_change_queue_depth,
|
||||||
.change_queue_type = ipr_change_queue_type,
|
|
||||||
.bios_param = ipr_biosparam,
|
.bios_param = ipr_biosparam,
|
||||||
.can_queue = IPR_MAX_COMMANDS,
|
.can_queue = IPR_MAX_COMMANDS,
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
|
@ -6841,7 +6838,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
|
||||||
ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
|
ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
|
||||||
|
|
||||||
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
|
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
|
||||||
if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
|
if (res->add_to_ml || res->del_from_ml) {
|
||||||
ipr_trace;
|
ipr_trace;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -6870,6 +6867,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
|
||||||
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
|
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
|
||||||
scsi_block_requests(ioa_cfg->host);
|
scsi_block_requests(ioa_cfg->host);
|
||||||
|
|
||||||
|
schedule_work(&ioa_cfg->work_q);
|
||||||
LEAVE;
|
LEAVE;
|
||||||
return IPR_RC_JOB_RETURN;
|
return IPR_RC_JOB_RETURN;
|
||||||
}
|
}
|
||||||
|
@ -7610,6 +7608,19 @@ static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
|
||||||
type[4] = '\0';
|
type[4] = '\0';
|
||||||
ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
|
ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
|
||||||
|
|
||||||
|
if (ipr_invalid_adapter(ioa_cfg)) {
|
||||||
|
dev_err(&ioa_cfg->pdev->dev,
|
||||||
|
"Adapter not supported in this hardware configuration.\n");
|
||||||
|
|
||||||
|
if (!ipr_testmode) {
|
||||||
|
ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
|
||||||
|
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
|
||||||
|
list_add_tail(&ipr_cmd->queue,
|
||||||
|
&ioa_cfg->hrrq->hrrq_free_q);
|
||||||
|
return IPR_RC_JOB_RETURN;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
|
ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
|
||||||
|
|
||||||
ipr_ioafp_inquiry(ipr_cmd, 1, 0,
|
ipr_ioafp_inquiry(ipr_cmd, 1, 0,
|
||||||
|
@ -8797,20 +8808,6 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
|
||||||
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
|
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
|
||||||
IPR_SHUTDOWN_NONE);
|
IPR_SHUTDOWN_NONE);
|
||||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
|
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
|
||||||
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
|
|
||||||
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
|
|
||||||
|
|
||||||
if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
|
|
||||||
rc = -EIO;
|
|
||||||
} else if (ipr_invalid_adapter(ioa_cfg)) {
|
|
||||||
if (!ipr_testmode)
|
|
||||||
rc = -EIO;
|
|
||||||
|
|
||||||
dev_err(&ioa_cfg->pdev->dev,
|
|
||||||
"Adapter not supported in this hardware configuration.\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
|
|
||||||
|
|
||||||
LEAVE;
|
LEAVE;
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -9264,7 +9261,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
|
||||||
* ioa_cfg->max_devs_supported)));
|
* ioa_cfg->max_devs_supported)));
|
||||||
}
|
}
|
||||||
|
|
||||||
host->max_channel = IPR_MAX_BUS_TO_SCAN;
|
host->max_channel = IPR_VSET_BUS;
|
||||||
host->unique_id = host->host_no;
|
host->unique_id = host->host_no;
|
||||||
host->max_cmd_len = IPR_MAX_CDB_LEN;
|
host->max_cmd_len = IPR_MAX_CDB_LEN;
|
||||||
host->can_queue = ioa_cfg->max_cmds;
|
host->can_queue = ioa_cfg->max_cmds;
|
||||||
|
@ -9763,25 +9760,6 @@ out_scsi_host_put:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* ipr_scan_vsets - Scans for VSET devices
|
|
||||||
* @ioa_cfg: ioa config struct
|
|
||||||
*
|
|
||||||
* Description: Since the VSET resources do not follow SAM in that we can have
|
|
||||||
* sparse LUNs with no LUN 0, we have to scan for these ourselves.
|
|
||||||
*
|
|
||||||
* Return value:
|
|
||||||
* none
|
|
||||||
**/
|
|
||||||
static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
|
|
||||||
{
|
|
||||||
int target, lun;
|
|
||||||
|
|
||||||
for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
|
|
||||||
for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
|
|
||||||
scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ipr_initiate_ioa_bringdown - Bring down an adapter
|
* ipr_initiate_ioa_bringdown - Bring down an adapter
|
||||||
* @ioa_cfg: ioa config struct
|
* @ioa_cfg: ioa config struct
|
||||||
|
@ -9937,10 +9915,6 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
scsi_scan_host(ioa_cfg->host);
|
scsi_scan_host(ioa_cfg->host);
|
||||||
ipr_scan_vsets(ioa_cfg);
|
|
||||||
scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
|
|
||||||
ioa_cfg->allow_ml_add_del = 1;
|
|
||||||
ioa_cfg->host->max_channel = IPR_VSET_BUS;
|
|
||||||
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
|
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
|
||||||
|
|
||||||
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
|
|
|
@ -157,13 +157,11 @@
|
||||||
|
|
||||||
#define IPR_MAX_NUM_TARGETS_PER_BUS 256
|
#define IPR_MAX_NUM_TARGETS_PER_BUS 256
|
||||||
#define IPR_MAX_NUM_LUNS_PER_TARGET 256
|
#define IPR_MAX_NUM_LUNS_PER_TARGET 256
|
||||||
#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8
|
|
||||||
#define IPR_VSET_BUS 0xff
|
#define IPR_VSET_BUS 0xff
|
||||||
#define IPR_IOA_BUS 0xff
|
#define IPR_IOA_BUS 0xff
|
||||||
#define IPR_IOA_TARGET 0xff
|
#define IPR_IOA_TARGET 0xff
|
||||||
#define IPR_IOA_LUN 0xff
|
#define IPR_IOA_LUN 0xff
|
||||||
#define IPR_MAX_NUM_BUSES 16
|
#define IPR_MAX_NUM_BUSES 16
|
||||||
#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES
|
|
||||||
|
|
||||||
#define IPR_NUM_RESET_RELOAD_RETRIES 3
|
#define IPR_NUM_RESET_RELOAD_RETRIES 3
|
||||||
|
|
||||||
|
@ -1453,7 +1451,7 @@ struct ipr_ioa_cfg {
|
||||||
u8 in_ioa_bringdown:1;
|
u8 in_ioa_bringdown:1;
|
||||||
u8 ioa_unit_checked:1;
|
u8 ioa_unit_checked:1;
|
||||||
u8 dump_taken:1;
|
u8 dump_taken:1;
|
||||||
u8 allow_ml_add_del:1;
|
u8 scan_done:1;
|
||||||
u8 needs_hard_reset:1;
|
u8 needs_hard_reset:1;
|
||||||
u8 dual_raid:1;
|
u8 dual_raid:1;
|
||||||
u8 needs_warm_reset:1;
|
u8 needs_warm_reset:1;
|
||||||
|
|
|
@ -158,7 +158,6 @@ static struct scsi_host_template isci_sht = {
|
||||||
.scan_finished = isci_host_scan_finished,
|
.scan_finished = isci_host_scan_finished,
|
||||||
.scan_start = isci_host_start,
|
.scan_start = isci_host_start,
|
||||||
.change_queue_depth = sas_change_queue_depth,
|
.change_queue_depth = sas_change_queue_depth,
|
||||||
.change_queue_type = sas_change_queue_type,
|
|
||||||
.bios_param = sas_bios_param,
|
.bios_param = sas_bios_param,
|
||||||
.can_queue = ISCI_CAN_QUEUE_VAL,
|
.can_queue = ISCI_CAN_QUEUE_VAL,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
|
|
|
@ -906,13 +906,6 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
|
||||||
return scsi_change_queue_depth(sdev, depth);
|
return scsi_change_queue_depth(sdev, depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
int sas_change_queue_type(struct scsi_device *scsi_dev, int type)
|
|
||||||
{
|
|
||||||
if (dev_is_sata(sdev_to_domain_dev(scsi_dev)))
|
|
||||||
return -EINVAL;
|
|
||||||
return scsi_change_queue_type(scsi_dev, type);
|
|
||||||
}
|
|
||||||
|
|
||||||
int sas_bios_param(struct scsi_device *scsi_dev,
|
int sas_bios_param(struct scsi_device *scsi_dev,
|
||||||
struct block_device *bdev,
|
struct block_device *bdev,
|
||||||
sector_t capacity, int *hsc)
|
sector_t capacity, int *hsc)
|
||||||
|
@ -1011,7 +1004,6 @@ EXPORT_SYMBOL_GPL(sas_queuecommand);
|
||||||
EXPORT_SYMBOL_GPL(sas_target_alloc);
|
EXPORT_SYMBOL_GPL(sas_target_alloc);
|
||||||
EXPORT_SYMBOL_GPL(sas_slave_configure);
|
EXPORT_SYMBOL_GPL(sas_slave_configure);
|
||||||
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
|
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
|
||||||
EXPORT_SYMBOL_GPL(sas_change_queue_type);
|
|
||||||
EXPORT_SYMBOL_GPL(sas_bios_param);
|
EXPORT_SYMBOL_GPL(sas_bios_param);
|
||||||
EXPORT_SYMBOL_GPL(sas_task_abort);
|
EXPORT_SYMBOL_GPL(sas_task_abort);
|
||||||
EXPORT_SYMBOL_GPL(sas_phy_reset);
|
EXPORT_SYMBOL_GPL(sas_phy_reset);
|
||||||
|
|
|
@ -5879,7 +5879,6 @@ struct scsi_host_template lpfc_template = {
|
||||||
.max_sectors = 0xFFFF,
|
.max_sectors = 0xFFFF,
|
||||||
.vendor_id = LPFC_NL_VENDOR_ID,
|
.vendor_id = LPFC_NL_VENDOR_ID,
|
||||||
.change_queue_depth = scsi_change_queue_depth,
|
.change_queue_depth = scsi_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.use_blk_tags = 1,
|
.use_blk_tags = 1,
|
||||||
.track_queue_depth = 1,
|
.track_queue_depth = 1,
|
||||||
};
|
};
|
||||||
|
@ -5904,7 +5903,6 @@ struct scsi_host_template lpfc_vport_template = {
|
||||||
.shost_attrs = lpfc_vport_attrs,
|
.shost_attrs = lpfc_vport_attrs,
|
||||||
.max_sectors = 0xFFFF,
|
.max_sectors = 0xFFFF,
|
||||||
.change_queue_depth = scsi_change_queue_depth,
|
.change_queue_depth = scsi_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.use_blk_tags = 1,
|
.use_blk_tags = 1,
|
||||||
.track_queue_depth = 1,
|
.track_queue_depth = 1,
|
||||||
};
|
};
|
||||||
|
|
|
@ -7592,7 +7592,6 @@ static struct scsi_host_template scsih_driver_template = {
|
||||||
.scan_finished = _scsih_scan_finished,
|
.scan_finished = _scsih_scan_finished,
|
||||||
.scan_start = _scsih_scan_start,
|
.scan_start = _scsih_scan_start,
|
||||||
.change_queue_depth = _scsih_change_queue_depth,
|
.change_queue_depth = _scsih_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.eh_abort_handler = _scsih_abort,
|
.eh_abort_handler = _scsih_abort,
|
||||||
.eh_device_reset_handler = _scsih_dev_reset,
|
.eh_device_reset_handler = _scsih_dev_reset,
|
||||||
.eh_target_reset_handler = _scsih_target_reset,
|
.eh_target_reset_handler = _scsih_target_reset,
|
||||||
|
|
|
@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
|
||||||
&mpt2sas_phy->remote_identify);
|
&mpt2sas_phy->remote_identify);
|
||||||
_transport_add_phy_to_an_existing_port(ioc, sas_node,
|
_transport_add_phy_to_an_existing_port(ioc, sas_node,
|
||||||
mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
|
mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
|
||||||
} else {
|
} else
|
||||||
memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
|
memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
|
||||||
sas_identify));
|
sas_identify));
|
||||||
_transport_del_phy_from_an_existing_port(ioc, sas_node,
|
|
||||||
mpt2sas_phy);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mpt2sas_phy->phy)
|
if (mpt2sas_phy->phy)
|
||||||
mpt2sas_phy->phy->negotiated_linkrate =
|
mpt2sas_phy->phy->negotiated_linkrate =
|
||||||
|
|
|
@ -7229,7 +7229,6 @@ static struct scsi_host_template scsih_driver_template = {
|
||||||
.scan_finished = _scsih_scan_finished,
|
.scan_finished = _scsih_scan_finished,
|
||||||
.scan_start = _scsih_scan_start,
|
.scan_start = _scsih_scan_start,
|
||||||
.change_queue_depth = _scsih_change_queue_depth,
|
.change_queue_depth = _scsih_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.eh_abort_handler = _scsih_abort,
|
.eh_abort_handler = _scsih_abort,
|
||||||
.eh_device_reset_handler = _scsih_dev_reset,
|
.eh_device_reset_handler = _scsih_dev_reset,
|
||||||
.eh_target_reset_handler = _scsih_target_reset,
|
.eh_target_reset_handler = _scsih_target_reset,
|
||||||
|
|
|
@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
|
||||||
&mpt3sas_phy->remote_identify);
|
&mpt3sas_phy->remote_identify);
|
||||||
_transport_add_phy_to_an_existing_port(ioc, sas_node,
|
_transport_add_phy_to_an_existing_port(ioc, sas_node,
|
||||||
mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
|
mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
|
||||||
} else {
|
} else
|
||||||
memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
|
memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
|
||||||
sas_identify));
|
sas_identify));
|
||||||
_transport_del_phy_from_an_existing_port(ioc, sas_node,
|
|
||||||
mpt3sas_phy);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mpt3sas_phy->phy)
|
if (mpt3sas_phy->phy)
|
||||||
mpt3sas_phy->phy->negotiated_linkrate =
|
mpt3sas_phy->phy->negotiated_linkrate =
|
||||||
|
|
|
@ -54,7 +54,6 @@ static struct scsi_host_template mvs_sht = {
|
||||||
.scan_finished = mvs_scan_finished,
|
.scan_finished = mvs_scan_finished,
|
||||||
.scan_start = mvs_scan_start,
|
.scan_start = mvs_scan_start,
|
||||||
.change_queue_depth = sas_change_queue_depth,
|
.change_queue_depth = sas_change_queue_depth,
|
||||||
.change_queue_type = sas_change_queue_type,
|
|
||||||
.bios_param = sas_bios_param,
|
.bios_param = sas_bios_param,
|
||||||
.can_queue = 1,
|
.can_queue = 1,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
|
|
|
@ -76,7 +76,6 @@ static struct scsi_host_template pm8001_sht = {
|
||||||
.scan_finished = pm8001_scan_finished,
|
.scan_finished = pm8001_scan_finished,
|
||||||
.scan_start = pm8001_scan_start,
|
.scan_start = pm8001_scan_start,
|
||||||
.change_queue_depth = sas_change_queue_depth,
|
.change_queue_depth = sas_change_queue_depth,
|
||||||
.change_queue_type = sas_change_queue_type,
|
|
||||||
.bios_param = sas_bios_param,
|
.bios_param = sas_bios_param,
|
||||||
.can_queue = 1,
|
.can_queue = 1,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
|
|
|
@ -4251,7 +4251,6 @@ static struct scsi_host_template pmcraid_host_template = {
|
||||||
.slave_configure = pmcraid_slave_configure,
|
.slave_configure = pmcraid_slave_configure,
|
||||||
.slave_destroy = pmcraid_slave_destroy,
|
.slave_destroy = pmcraid_slave_destroy,
|
||||||
.change_queue_depth = pmcraid_change_queue_depth,
|
.change_queue_depth = pmcraid_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.can_queue = PMCRAID_MAX_IO_CMD,
|
.can_queue = PMCRAID_MAX_IO_CMD,
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.sg_tablesize = PMCRAID_MAX_IOADLS,
|
.sg_tablesize = PMCRAID_MAX_IOADLS,
|
||||||
|
|
|
@ -3237,8 +3237,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
|
||||||
struct fc_rport *rport;
|
struct fc_rport *rport;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
qla2x00_rport_del(fcport);
|
|
||||||
|
|
||||||
rport_ids.node_name = wwn_to_u64(fcport->node_name);
|
rport_ids.node_name = wwn_to_u64(fcport->node_name);
|
||||||
rport_ids.port_name = wwn_to_u64(fcport->port_name);
|
rport_ids.port_name = wwn_to_u64(fcport->port_name);
|
||||||
rport_ids.port_id = fcport->d_id.b.domain << 16 |
|
rport_ids.port_id = fcport->d_id.b.domain << 16 |
|
||||||
|
|
|
@ -258,7 +258,6 @@ struct scsi_host_template qla2xxx_driver_template = {
|
||||||
.scan_finished = qla2xxx_scan_finished,
|
.scan_finished = qla2xxx_scan_finished,
|
||||||
.scan_start = qla2xxx_scan_start,
|
.scan_start = qla2xxx_scan_start,
|
||||||
.change_queue_depth = scsi_change_queue_depth,
|
.change_queue_depth = scsi_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.cmd_per_lun = 3,
|
.cmd_per_lun = 3,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
|
|
@ -3218,25 +3218,25 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
|
||||||
|
|
||||||
switch (task_codes) {
|
switch (task_codes) {
|
||||||
case ATIO_SIMPLE_QUEUE:
|
case ATIO_SIMPLE_QUEUE:
|
||||||
fcp_task_attr = MSG_SIMPLE_TAG;
|
fcp_task_attr = TCM_SIMPLE_TAG;
|
||||||
break;
|
break;
|
||||||
case ATIO_HEAD_OF_QUEUE:
|
case ATIO_HEAD_OF_QUEUE:
|
||||||
fcp_task_attr = MSG_HEAD_TAG;
|
fcp_task_attr = TCM_HEAD_TAG;
|
||||||
break;
|
break;
|
||||||
case ATIO_ORDERED_QUEUE:
|
case ATIO_ORDERED_QUEUE:
|
||||||
fcp_task_attr = MSG_ORDERED_TAG;
|
fcp_task_attr = TCM_ORDERED_TAG;
|
||||||
break;
|
break;
|
||||||
case ATIO_ACA_QUEUE:
|
case ATIO_ACA_QUEUE:
|
||||||
fcp_task_attr = MSG_ACA_TAG;
|
fcp_task_attr = TCM_ACA_TAG;
|
||||||
break;
|
break;
|
||||||
case ATIO_UNTAGGED:
|
case ATIO_UNTAGGED:
|
||||||
fcp_task_attr = MSG_SIMPLE_TAG;
|
fcp_task_attr = TCM_SIMPLE_TAG;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
|
||||||
"qla_target: unknown task code %x, use ORDERED instead\n",
|
"qla_target: unknown task code %x, use ORDERED instead\n",
|
||||||
task_codes);
|
task_codes);
|
||||||
fcp_task_attr = MSG_ORDERED_TAG;
|
fcp_task_attr = TCM_ORDERED_TAG;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -739,33 +739,11 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
|
||||||
|
|
||||||
if (sdev->last_queue_full_count <= 10)
|
if (sdev->last_queue_full_count <= 10)
|
||||||
return 0;
|
return 0;
|
||||||
if (sdev->last_queue_full_depth < 8) {
|
|
||||||
/* Drop back to untagged */
|
|
||||||
scsi_set_tag_type(sdev, 0);
|
|
||||||
scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return scsi_change_queue_depth(sdev, depth);
|
return scsi_change_queue_depth(sdev, depth);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(scsi_track_queue_full);
|
EXPORT_SYMBOL(scsi_track_queue_full);
|
||||||
|
|
||||||
/**
|
|
||||||
* scsi_change_queue_type() - Change a device's queue type
|
|
||||||
* @sdev: The SCSI device whose queue depth is to change
|
|
||||||
* @tag_type: Identifier for queue type
|
|
||||||
*/
|
|
||||||
int scsi_change_queue_type(struct scsi_device *sdev, int tag_type)
|
|
||||||
{
|
|
||||||
if (!sdev->tagged_supported)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
scsi_set_tag_type(sdev, tag_type);
|
|
||||||
return tag_type;
|
|
||||||
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(scsi_change_queue_type);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* scsi_vpd_inquiry - Request a device provide us with a VPD page
|
* scsi_vpd_inquiry - Request a device provide us with a VPD page
|
||||||
* @sdev: The device to ask
|
* @sdev: The device to ask
|
||||||
|
|
|
@ -128,7 +128,6 @@ static const char *scsi_debug_version_date = "20141022";
|
||||||
#define DEF_REMOVABLE false
|
#define DEF_REMOVABLE false
|
||||||
#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
|
#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
|
||||||
#define DEF_SECTOR_SIZE 512
|
#define DEF_SECTOR_SIZE 512
|
||||||
#define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
|
|
||||||
#define DEF_UNMAP_ALIGNMENT 0
|
#define DEF_UNMAP_ALIGNMENT 0
|
||||||
#define DEF_UNMAP_GRANULARITY 1
|
#define DEF_UNMAP_GRANULARITY 1
|
||||||
#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
|
#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
|
||||||
|
@ -817,6 +816,7 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
|
||||||
UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
|
UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
|
||||||
if (debug)
|
if (debug)
|
||||||
cp = "capacity data changed";
|
cp = "capacity data changed";
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
pr_warn("%s: unexpected unit attention code=%d\n",
|
pr_warn("%s: unexpected unit attention code=%d\n",
|
||||||
__func__, k);
|
__func__, k);
|
||||||
|
@ -3045,18 +3045,12 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
|
||||||
u8 num;
|
u8 num;
|
||||||
unsigned long iflags;
|
unsigned long iflags;
|
||||||
int ret;
|
int ret;
|
||||||
|
int retval = 0;
|
||||||
|
|
||||||
lba = get_unaligned_be32(cmd + 2);
|
lba = get_unaligned_be64(cmd + 2);
|
||||||
num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
|
num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
|
||||||
if (0 == num)
|
if (0 == num)
|
||||||
return 0; /* degenerate case, not an error */
|
return 0; /* degenerate case, not an error */
|
||||||
dnum = 2 * num;
|
|
||||||
arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
|
|
||||||
if (NULL == arr) {
|
|
||||||
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
|
|
||||||
INSUFF_RES_ASCQ);
|
|
||||||
return check_condition_result;
|
|
||||||
}
|
|
||||||
if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
|
if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
|
||||||
(cmd[1] & 0xe0)) {
|
(cmd[1] & 0xe0)) {
|
||||||
mk_sense_invalid_opcode(scp);
|
mk_sense_invalid_opcode(scp);
|
||||||
|
@ -3079,6 +3073,13 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
|
||||||
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
|
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
|
||||||
return check_condition_result;
|
return check_condition_result;
|
||||||
}
|
}
|
||||||
|
dnum = 2 * num;
|
||||||
|
arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
|
||||||
|
if (NULL == arr) {
|
||||||
|
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
|
||||||
|
INSUFF_RES_ASCQ);
|
||||||
|
return check_condition_result;
|
||||||
|
}
|
||||||
|
|
||||||
write_lock_irqsave(&atomic_rw, iflags);
|
write_lock_irqsave(&atomic_rw, iflags);
|
||||||
|
|
||||||
|
@ -3089,24 +3090,24 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
|
||||||
ret = do_device_access(scp, 0, dnum, true);
|
ret = do_device_access(scp, 0, dnum, true);
|
||||||
fake_storep = fake_storep_hold;
|
fake_storep = fake_storep_hold;
|
||||||
if (ret == -1) {
|
if (ret == -1) {
|
||||||
write_unlock_irqrestore(&atomic_rw, iflags);
|
retval = DID_ERROR << 16;
|
||||||
kfree(arr);
|
goto cleanup;
|
||||||
return DID_ERROR << 16;
|
|
||||||
} else if ((ret < (dnum * lb_size)) &&
|
} else if ((ret < (dnum * lb_size)) &&
|
||||||
(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
|
(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
|
||||||
sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
|
sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
|
||||||
"indicated=%u, IO sent=%d bytes\n", my_name,
|
"indicated=%u, IO sent=%d bytes\n", my_name,
|
||||||
dnum * lb_size, ret);
|
dnum * lb_size, ret);
|
||||||
if (!comp_write_worker(lba, num, arr)) {
|
if (!comp_write_worker(lba, num, arr)) {
|
||||||
write_unlock_irqrestore(&atomic_rw, iflags);
|
|
||||||
kfree(arr);
|
|
||||||
mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
|
mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
|
||||||
return check_condition_result;
|
retval = check_condition_result;
|
||||||
|
goto cleanup;
|
||||||
}
|
}
|
||||||
if (scsi_debug_lbp())
|
if (scsi_debug_lbp())
|
||||||
map_region(lba, num);
|
map_region(lba, num);
|
||||||
|
cleanup:
|
||||||
write_unlock_irqrestore(&atomic_rw, iflags);
|
write_unlock_irqrestore(&atomic_rw, iflags);
|
||||||
return 0;
|
kfree(arr);
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct unmap_block_desc {
|
struct unmap_block_desc {
|
||||||
|
@ -4438,6 +4439,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
|
||||||
struct sdebug_host_info *sdhp;
|
struct sdebug_host_info *sdhp;
|
||||||
struct sdebug_dev_info *dp;
|
struct sdebug_dev_info *dp;
|
||||||
|
|
||||||
|
spin_lock(&sdebug_host_list_lock);
|
||||||
list_for_each_entry(sdhp, &sdebug_host_list,
|
list_for_each_entry(sdhp, &sdebug_host_list,
|
||||||
host_list) {
|
host_list) {
|
||||||
list_for_each_entry(dp, &sdhp->dev_info_list,
|
list_for_each_entry(dp, &sdhp->dev_info_list,
|
||||||
|
@ -4446,6 +4448,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
|
||||||
dp->uas_bm);
|
dp->uas_bm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
spin_unlock(&sdebug_host_list_lock);
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -4987,32 +4990,6 @@ sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
|
||||||
return sdev->queue_depth;
|
return sdev->queue_depth;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
sdebug_change_qtype(struct scsi_device *sdev, int qtype)
|
|
||||||
{
|
|
||||||
qtype = scsi_change_queue_type(sdev, qtype);
|
|
||||||
if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
|
|
||||||
const char *cp;
|
|
||||||
|
|
||||||
switch (qtype) {
|
|
||||||
case 0:
|
|
||||||
cp = "untagged";
|
|
||||||
break;
|
|
||||||
case MSG_SIMPLE_TAG:
|
|
||||||
cp = "simple tags";
|
|
||||||
break;
|
|
||||||
case MSG_ORDERED_TAG:
|
|
||||||
cp = "ordered tags";
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
cp = "unknown";
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
|
|
||||||
}
|
|
||||||
return qtype;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
check_inject(struct scsi_cmnd *scp)
|
check_inject(struct scsi_cmnd *scp)
|
||||||
{
|
{
|
||||||
|
@ -5212,7 +5189,6 @@ static struct scsi_host_template sdebug_driver_template = {
|
||||||
.ioctl = scsi_debug_ioctl,
|
.ioctl = scsi_debug_ioctl,
|
||||||
.queuecommand = sdebug_queuecommand_lock_or_not,
|
.queuecommand = sdebug_queuecommand_lock_or_not,
|
||||||
.change_queue_depth = sdebug_change_qdepth,
|
.change_queue_depth = sdebug_change_qdepth,
|
||||||
.change_queue_type = sdebug_change_qtype,
|
|
||||||
.eh_abort_handler = scsi_debug_abort,
|
.eh_abort_handler = scsi_debug_abort,
|
||||||
.eh_device_reset_handler = scsi_debug_device_reset,
|
.eh_device_reset_handler = scsi_debug_device_reset,
|
||||||
.eh_target_reset_handler = scsi_debug_target_reset,
|
.eh_target_reset_handler = scsi_debug_target_reset,
|
||||||
|
|
|
@ -738,30 +738,12 @@ store_queue_type_field(struct device *dev, struct device_attribute *attr,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct scsi_device *sdev = to_scsi_device(dev);
|
struct scsi_device *sdev = to_scsi_device(dev);
|
||||||
struct scsi_host_template *sht = sdev->host->hostt;
|
|
||||||
int tag_type = 0, retval;
|
|
||||||
int prev_tag_type = scsi_get_tag_type(sdev);
|
|
||||||
|
|
||||||
if (!sdev->tagged_supported || !sht->change_queue_type)
|
if (!sdev->tagged_supported)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
sdev_printk(KERN_INFO, sdev,
|
||||||
* We're never issueing order tags these days, but allow the value
|
"ignoring write to deprecated queue_type attribute");
|
||||||
* for backwards compatibility.
|
|
||||||
*/
|
|
||||||
if (strncmp(buf, "ordered", 7) == 0 ||
|
|
||||||
strncmp(buf, "simple", 6) == 0)
|
|
||||||
tag_type = MSG_SIMPLE_TAG;
|
|
||||||
else if (strncmp(buf, "none", 4) != 0)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (tag_type == prev_tag_type)
|
|
||||||
return count;
|
|
||||||
|
|
||||||
retval = sht->change_queue_type(sdev, tag_type);
|
|
||||||
if (retval < 0)
|
|
||||||
return retval;
|
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -938,10 +920,6 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
|
||||||
!sdev->host->hostt->change_queue_depth)
|
!sdev->host->hostt->change_queue_depth)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (attr == &dev_attr_queue_type.attr &&
|
|
||||||
!sdev->host->hostt->change_queue_type)
|
|
||||||
return S_IRUGO;
|
|
||||||
|
|
||||||
return attr->mode;
|
return attr->mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1221,7 +1221,7 @@ EXPORT_SYMBOL_GPL(spi_populate_ppr_msg);
|
||||||
int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
|
int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
if (cmd->flags & SCMD_TAGGED) {
|
if (cmd->flags & SCMD_TAGGED) {
|
||||||
*msg++ = MSG_SIMPLE_TAG;
|
*msg++ = SIMPLE_QUEUE_TAG;
|
||||||
*msg++ = cmd->request->tag;
|
*msg++ = cmd->request->tag;
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1688,13 +1688,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
||||||
if (ret == -EAGAIN) {
|
if (ret == -EAGAIN) {
|
||||||
/* no more space */
|
/* no more space */
|
||||||
|
|
||||||
if (cmd_request->bounce_sgl_count) {
|
if (cmd_request->bounce_sgl_count)
|
||||||
destroy_bounce_buffer(cmd_request->bounce_sgl,
|
destroy_bounce_buffer(cmd_request->bounce_sgl,
|
||||||
cmd_request->bounce_sgl_count);
|
cmd_request->bounce_sgl_count);
|
||||||
|
|
||||||
ret = SCSI_MLQUEUE_DEVICE_BUSY;
|
ret = SCSI_MLQUEUE_DEVICE_BUSY;
|
||||||
goto queue_error;
|
goto queue_error;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -943,17 +943,17 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||||
*/
|
*/
|
||||||
if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
|
if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
|
||||||
(iscsi_task_attr == ISCSI_ATTR_SIMPLE))
|
(iscsi_task_attr == ISCSI_ATTR_SIMPLE))
|
||||||
sam_task_attr = MSG_SIMPLE_TAG;
|
sam_task_attr = TCM_SIMPLE_TAG;
|
||||||
else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
|
else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
|
||||||
sam_task_attr = MSG_ORDERED_TAG;
|
sam_task_attr = TCM_ORDERED_TAG;
|
||||||
else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
|
else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
|
||||||
sam_task_attr = MSG_HEAD_TAG;
|
sam_task_attr = TCM_HEAD_TAG;
|
||||||
else if (iscsi_task_attr == ISCSI_ATTR_ACA)
|
else if (iscsi_task_attr == ISCSI_ATTR_ACA)
|
||||||
sam_task_attr = MSG_ACA_TAG;
|
sam_task_attr = TCM_ACA_TAG;
|
||||||
else {
|
else {
|
||||||
pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
|
pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
|
||||||
" MSG_SIMPLE_TAG\n", iscsi_task_attr);
|
" TCM_SIMPLE_TAG\n", iscsi_task_attr);
|
||||||
sam_task_attr = MSG_SIMPLE_TAG;
|
sam_task_attr = TCM_SIMPLE_TAG;
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
|
cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
|
||||||
|
@ -1811,7 +1811,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||||
transport_init_se_cmd(&cmd->se_cmd,
|
transport_init_se_cmd(&cmd->se_cmd,
|
||||||
&lio_target_fabric_configfs->tf_ops,
|
&lio_target_fabric_configfs->tf_ops,
|
||||||
conn->sess->se_sess, 0, DMA_NONE,
|
conn->sess->se_sess, 0, DMA_NONE,
|
||||||
MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
|
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
|
||||||
|
|
||||||
target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
|
target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
|
||||||
sess_ref = true;
|
sess_ref = true;
|
||||||
|
|
|
@ -168,7 +168,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
|
||||||
|
|
||||||
rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
|
rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
|
||||||
&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
|
&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
|
||||||
transfer_length, MSG_SIMPLE_TAG,
|
transfer_length, TCM_SIMPLE_TAG,
|
||||||
sc->sc_data_direction, 0,
|
sc->sc_data_direction, 0,
|
||||||
scsi_sglist(sc), scsi_sg_count(sc),
|
scsi_sglist(sc), scsi_sg_count(sc),
|
||||||
sgl_bidi, sgl_bidi_count,
|
sgl_bidi, sgl_bidi_count,
|
||||||
|
@ -248,7 +248,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
|
||||||
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
|
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
|
||||||
*/
|
*/
|
||||||
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
|
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
|
||||||
DMA_NONE, MSG_SIMPLE_TAG,
|
DMA_NONE, TCM_SIMPLE_TAG,
|
||||||
&tl_cmd->tl_sense_buf[0]);
|
&tl_cmd->tl_sense_buf[0]);
|
||||||
|
|
||||||
rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
|
rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
|
||||||
|
@ -385,7 +385,6 @@ static struct scsi_host_template tcm_loop_driver_template = {
|
||||||
.name = "TCM_Loopback",
|
.name = "TCM_Loopback",
|
||||||
.queuecommand = tcm_loop_queuecommand,
|
.queuecommand = tcm_loop_queuecommand,
|
||||||
.change_queue_depth = scsi_change_queue_depth,
|
.change_queue_depth = scsi_change_queue_depth,
|
||||||
.change_queue_type = scsi_change_queue_type,
|
|
||||||
.eh_abort_handler = tcm_loop_abort_task,
|
.eh_abort_handler = tcm_loop_abort_task,
|
||||||
.eh_device_reset_handler = tcm_loop_device_reset,
|
.eh_device_reset_handler = tcm_loop_device_reset,
|
||||||
.eh_target_reset_handler = tcm_loop_target_reset,
|
.eh_target_reset_handler = tcm_loop_target_reset,
|
||||||
|
|
|
@ -1237,7 +1237,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
|
||||||
|
|
||||||
if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
|
if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
|
||||||
req->sense_buf, unpacked_lun, data_length,
|
req->sense_buf, unpacked_lun, data_length,
|
||||||
MSG_SIMPLE_TAG, data_dir, 0))
|
TCM_SIMPLE_TAG, data_dir, 0))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1094,7 +1094,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
|
||||||
req->retries = PS_RETRY;
|
req->retries = PS_RETRY;
|
||||||
|
|
||||||
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
|
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
|
||||||
(cmd->sam_task_attr == MSG_HEAD_TAG),
|
(cmd->sam_task_attr == TCM_HEAD_TAG),
|
||||||
pscsi_req_done);
|
pscsi_req_done);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -485,7 +485,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
|
||||||
cmd->t_data_nents_orig = cmd->t_data_nents;
|
cmd->t_data_nents_orig = cmd->t_data_nents;
|
||||||
cmd->t_data_nents = 1;
|
cmd->t_data_nents = 1;
|
||||||
|
|
||||||
cmd->sam_task_attr = MSG_HEAD_TAG;
|
cmd->sam_task_attr = TCM_HEAD_TAG;
|
||||||
cmd->transport_complete_callback = compare_and_write_post;
|
cmd->transport_complete_callback = compare_and_write_post;
|
||||||
/*
|
/*
|
||||||
* Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
|
* Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
|
||||||
|
|
|
@ -1357,7 +1357,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||||
* Do implicit HEAD_OF_QUEUE processing for INQUIRY.
|
* Do implicit HEAD_OF_QUEUE processing for INQUIRY.
|
||||||
* See spc4r17 section 5.3
|
* See spc4r17 section 5.3
|
||||||
*/
|
*/
|
||||||
cmd->sam_task_attr = MSG_HEAD_TAG;
|
cmd->sam_task_attr = TCM_HEAD_TAG;
|
||||||
cmd->execute_cmd = spc_emulate_inquiry;
|
cmd->execute_cmd = spc_emulate_inquiry;
|
||||||
break;
|
break;
|
||||||
case SECURITY_PROTOCOL_IN:
|
case SECURITY_PROTOCOL_IN:
|
||||||
|
@ -1391,7 +1391,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||||
* Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
|
* Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
|
||||||
* See spc4r17 section 5.3
|
* See spc4r17 section 5.3
|
||||||
*/
|
*/
|
||||||
cmd->sam_task_attr = MSG_HEAD_TAG;
|
cmd->sam_task_attr = TCM_HEAD_TAG;
|
||||||
break;
|
break;
|
||||||
case TEST_UNIT_READY:
|
case TEST_UNIT_READY:
|
||||||
cmd->execute_cmd = spc_emulate_testunitready;
|
cmd->execute_cmd = spc_emulate_testunitready;
|
||||||
|
|
|
@ -1159,7 +1159,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
|
||||||
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
|
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (cmd->sam_task_attr == MSG_ACA_TAG) {
|
if (cmd->sam_task_attr == TCM_ACA_TAG) {
|
||||||
pr_debug("SAM Task Attribute ACA"
|
pr_debug("SAM Task Attribute ACA"
|
||||||
" emulation is not supported\n");
|
" emulation is not supported\n");
|
||||||
return TCM_INVALID_CDB_FIELD;
|
return TCM_INVALID_CDB_FIELD;
|
||||||
|
@ -1531,7 +1531,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
|
||||||
BUG_ON(!se_tpg);
|
BUG_ON(!se_tpg);
|
||||||
|
|
||||||
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
|
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
|
||||||
0, DMA_NONE, MSG_SIMPLE_TAG, sense);
|
0, DMA_NONE, TCM_SIMPLE_TAG, sense);
|
||||||
/*
|
/*
|
||||||
* FIXME: Currently expect caller to handle se_cmd->se_tmr_req
|
* FIXME: Currently expect caller to handle se_cmd->se_tmr_req
|
||||||
* allocation failure.
|
* allocation failure.
|
||||||
|
@ -1718,12 +1718,12 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
|
||||||
* to allow the passed struct se_cmd list of tasks to the front of the list.
|
* to allow the passed struct se_cmd list of tasks to the front of the list.
|
||||||
*/
|
*/
|
||||||
switch (cmd->sam_task_attr) {
|
switch (cmd->sam_task_attr) {
|
||||||
case MSG_HEAD_TAG:
|
case TCM_HEAD_TAG:
|
||||||
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
|
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
|
||||||
"se_ordered_id: %u\n",
|
"se_ordered_id: %u\n",
|
||||||
cmd->t_task_cdb[0], cmd->se_ordered_id);
|
cmd->t_task_cdb[0], cmd->se_ordered_id);
|
||||||
return false;
|
return false;
|
||||||
case MSG_ORDERED_TAG:
|
case TCM_ORDERED_TAG:
|
||||||
atomic_inc_mb(&dev->dev_ordered_sync);
|
atomic_inc_mb(&dev->dev_ordered_sync);
|
||||||
|
|
||||||
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
|
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
|
||||||
|
@ -1828,7 +1828,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
|
||||||
|
|
||||||
__target_execute_cmd(cmd);
|
__target_execute_cmd(cmd);
|
||||||
|
|
||||||
if (cmd->sam_task_attr == MSG_ORDERED_TAG)
|
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1844,18 +1844,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
|
||||||
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
|
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
|
if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
|
||||||
atomic_dec_mb(&dev->simple_cmds);
|
atomic_dec_mb(&dev->simple_cmds);
|
||||||
dev->dev_cur_ordered_id++;
|
dev->dev_cur_ordered_id++;
|
||||||
pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
|
pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
|
||||||
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
|
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
|
||||||
cmd->se_ordered_id);
|
cmd->se_ordered_id);
|
||||||
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
|
} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
|
||||||
dev->dev_cur_ordered_id++;
|
dev->dev_cur_ordered_id++;
|
||||||
pr_debug("Incremented dev_cur_ordered_id: %u for"
|
pr_debug("Incremented dev_cur_ordered_id: %u for"
|
||||||
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
|
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
|
||||||
cmd->se_ordered_id);
|
cmd->se_ordered_id);
|
||||||
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
|
} else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
|
||||||
atomic_dec_mb(&dev->dev_ordered_sync);
|
atomic_dec_mb(&dev->dev_ordered_sync);
|
||||||
|
|
||||||
dev->dev_cur_ordered_id++;
|
dev->dev_cur_ordered_id++;
|
||||||
|
|
|
@ -554,17 +554,17 @@ static void ft_send_work(struct work_struct *work)
|
||||||
*/
|
*/
|
||||||
switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
|
switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
|
||||||
case FCP_PTA_HEADQ:
|
case FCP_PTA_HEADQ:
|
||||||
task_attr = MSG_HEAD_TAG;
|
task_attr = TCM_HEAD_TAG;
|
||||||
break;
|
break;
|
||||||
case FCP_PTA_ORDERED:
|
case FCP_PTA_ORDERED:
|
||||||
task_attr = MSG_ORDERED_TAG;
|
task_attr = TCM_ORDERED_TAG;
|
||||||
break;
|
break;
|
||||||
case FCP_PTA_ACA:
|
case FCP_PTA_ACA:
|
||||||
task_attr = MSG_ACA_TAG;
|
task_attr = TCM_ACA_TAG;
|
||||||
break;
|
break;
|
||||||
case FCP_PTA_SIMPLE: /* Fallthrough */
|
case FCP_PTA_SIMPLE: /* Fallthrough */
|
||||||
default:
|
default:
|
||||||
task_attr = MSG_SIMPLE_TAG;
|
task_attr = TCM_SIMPLE_TAG;
|
||||||
}
|
}
|
||||||
|
|
||||||
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
|
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
|
||||||
|
|
|
@ -1131,19 +1131,19 @@ static int usbg_submit_command(struct f_uas *fu,
|
||||||
|
|
||||||
switch (cmd_iu->prio_attr & 0x7) {
|
switch (cmd_iu->prio_attr & 0x7) {
|
||||||
case UAS_HEAD_TAG:
|
case UAS_HEAD_TAG:
|
||||||
cmd->prio_attr = MSG_HEAD_TAG;
|
cmd->prio_attr = TCM_HEAD_TAG;
|
||||||
break;
|
break;
|
||||||
case UAS_ORDERED_TAG:
|
case UAS_ORDERED_TAG:
|
||||||
cmd->prio_attr = MSG_ORDERED_TAG;
|
cmd->prio_attr = TCM_ORDERED_TAG;
|
||||||
break;
|
break;
|
||||||
case UAS_ACA:
|
case UAS_ACA:
|
||||||
cmd->prio_attr = MSG_ACA_TAG;
|
cmd->prio_attr = TCM_ACA_TAG;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_debug_once("Unsupported prio_attr: %02x.\n",
|
pr_debug_once("Unsupported prio_attr: %02x.\n",
|
||||||
cmd_iu->prio_attr);
|
cmd_iu->prio_attr);
|
||||||
case UAS_SIMPLE_TAG:
|
case UAS_SIMPLE_TAG:
|
||||||
cmd->prio_attr = MSG_SIMPLE_TAG;
|
cmd->prio_attr = TCM_SIMPLE_TAG;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1240,7 +1240,7 @@ static int bot_submit_command(struct f_uas *fu,
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd->prio_attr = MSG_SIMPLE_TAG;
|
cmd->prio_attr = TCM_SIMPLE_TAG;
|
||||||
se_cmd = &cmd->se_cmd;
|
se_cmd = &cmd->se_cmd;
|
||||||
cmd->unpacked_lun = cbw->Lun;
|
cmd->unpacked_lun = cbw->Lun;
|
||||||
cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
|
cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
|
||||||
|
|
|
@ -606,7 +606,7 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
|
||||||
init_waitqueue_head(&tmr->tmr_wait);
|
init_waitqueue_head(&tmr->tmr_wait);
|
||||||
|
|
||||||
transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
|
transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
|
||||||
tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG,
|
tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG,
|
||||||
&pending_req->sense_buffer[0]);
|
&pending_req->sense_buffer[0]);
|
||||||
|
|
||||||
rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);
|
rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);
|
||||||
|
|
|
@ -688,7 +688,6 @@ extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *);
|
||||||
extern int sas_target_alloc(struct scsi_target *);
|
extern int sas_target_alloc(struct scsi_target *);
|
||||||
extern int sas_slave_configure(struct scsi_device *);
|
extern int sas_slave_configure(struct scsi_device *);
|
||||||
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
|
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
|
||||||
extern int sas_change_queue_type(struct scsi_device *, int qt);
|
|
||||||
extern int sas_bios_param(struct scsi_device *,
|
extern int sas_bios_param(struct scsi_device *,
|
||||||
struct block_device *,
|
struct block_device *,
|
||||||
sector_t capacity, int *hsc);
|
sector_t capacity, int *hsc);
|
||||||
|
|
|
@ -277,19 +277,6 @@ struct scsi_host_template {
|
||||||
*/
|
*/
|
||||||
int (* change_queue_depth)(struct scsi_device *, int);
|
int (* change_queue_depth)(struct scsi_device *, int);
|
||||||
|
|
||||||
/*
|
|
||||||
* Fill in this function to allow the changing of tag types
|
|
||||||
* (this also allows the enabling/disabling of tag command
|
|
||||||
* queueing). An error should only be returned if something
|
|
||||||
* went wrong in the driver while trying to set the tag type.
|
|
||||||
* If the driver doesn't support the requested tag type, then
|
|
||||||
* it should set the closest type it does support without
|
|
||||||
* returning an error. Returns the actual tag type set.
|
|
||||||
*
|
|
||||||
* Status: OPTIONAL
|
|
||||||
*/
|
|
||||||
int (* change_queue_type)(struct scsi_device *, int);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function determines the BIOS parameters for a given
|
* This function determines the BIOS parameters for a given
|
||||||
* harddisk. These tend to be numbers that are made up by
|
* harddisk. These tend to be numbers that are made up by
|
||||||
|
|
|
@ -6,46 +6,10 @@
|
||||||
#include <scsi/scsi_device.h>
|
#include <scsi/scsi_device.h>
|
||||||
#include <scsi/scsi_host.h>
|
#include <scsi/scsi_host.h>
|
||||||
|
|
||||||
#define MSG_SIMPLE_TAG 0x20
|
|
||||||
#define MSG_HEAD_TAG 0x21
|
|
||||||
#define MSG_ORDERED_TAG 0x22
|
|
||||||
#define MSG_ACA_TAG 0x24 /* unsupported */
|
|
||||||
|
|
||||||
#define SCSI_NO_TAG (-1) /* identify no tag in use */
|
#define SCSI_NO_TAG (-1) /* identify no tag in use */
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLOCK
|
#ifdef CONFIG_BLOCK
|
||||||
|
|
||||||
int scsi_change_queue_type(struct scsi_device *sdev, int tag_type);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* scsi_get_tag_type - get the type of tag the device supports
|
|
||||||
* @sdev: the scsi device
|
|
||||||
*/
|
|
||||||
static inline int scsi_get_tag_type(struct scsi_device *sdev)
|
|
||||||
{
|
|
||||||
if (!sdev->tagged_supported)
|
|
||||||
return 0;
|
|
||||||
if (sdev->simple_tags)
|
|
||||||
return MSG_SIMPLE_TAG;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void scsi_set_tag_type(struct scsi_device *sdev, int tag)
|
|
||||||
{
|
|
||||||
switch (tag) {
|
|
||||||
case MSG_ORDERED_TAG:
|
|
||||||
case MSG_SIMPLE_TAG:
|
|
||||||
sdev->simple_tags = 1;
|
|
||||||
break;
|
|
||||||
case 0:
|
|
||||||
/* fall through */
|
|
||||||
default:
|
|
||||||
sdev->simple_tags = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost,
|
static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost,
|
||||||
int unique_tag)
|
int unique_tag)
|
||||||
{
|
{
|
||||||
|
|
|
@ -476,6 +476,12 @@ struct se_dif_v1_tuple {
|
||||||
__be32 ref_tag;
|
__be32 ref_tag;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* for sam_task_attr */
|
||||||
|
#define TCM_SIMPLE_TAG 0x20
|
||||||
|
#define TCM_HEAD_TAG 0x21
|
||||||
|
#define TCM_ORDERED_TAG 0x22
|
||||||
|
#define TCM_ACA_TAG 0x24
|
||||||
|
|
||||||
struct se_cmd {
|
struct se_cmd {
|
||||||
/* SAM response code being sent to initiator */
|
/* SAM response code being sent to initiator */
|
||||||
u8 scsi_status;
|
u8 scsi_status;
|
||||||
|
|
|
@ -109,10 +109,10 @@
|
||||||
|
|
||||||
#define show_task_attribute_name(val) \
|
#define show_task_attribute_name(val) \
|
||||||
__print_symbolic(val, \
|
__print_symbolic(val, \
|
||||||
{ MSG_SIMPLE_TAG, "SIMPLE" }, \
|
{ TCM_SIMPLE_TAG, "SIMPLE" }, \
|
||||||
{ MSG_HEAD_TAG, "HEAD" }, \
|
{ TCM_HEAD_TAG, "HEAD" }, \
|
||||||
{ MSG_ORDERED_TAG, "ORDERED" }, \
|
{ TCM_ORDERED_TAG, "ORDERED" }, \
|
||||||
{ MSG_ACA_TAG, "ACA" } )
|
{ TCM_ACA_TAG, "ACA" } )
|
||||||
|
|
||||||
#define show_scsi_status_name(val) \
|
#define show_scsi_status_name(val) \
|
||||||
__print_symbolic(val, \
|
__print_symbolic(val, \
|
||||||
|
|
Loading…
Reference in New Issue