[SCSI] qla2xxx: Consolidation of SRB processing.

Rework the structures related to SRB processing to minimize the memory
allocations per I/O and manage resources associated with and completions
from common routines.

Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com>
Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
Giridhar Malavali 2012-02-09 11:15:36 -08:00 committed by James Bottomley
parent 69e5f1ea61
commit 9ba56b95a5
11 changed files with 481 additions and 624 deletions

View File

@ -11,29 +11,36 @@
#include <linux/delay.h>
/* BSG support for ELS/CT pass through */
inline srb_t *
qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
void
qla2x00_bsg_job_done(void *data, void *ptr, int res)
{
srb_t *sp;
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
bsg_job->reply->result = res;
bsg_job->job_done(bsg_job);
sp->free(vha, sp);
}
void
qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx;
sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
if (!sp)
goto done;
ctx = kzalloc(size, GFP_KERNEL);
if (!ctx) {
mempool_free(sp, ha->srb_mempool);
sp = NULL;
goto done;
}
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
ctx->iocbs = 1;
done:
return sp;
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
mempool_free(sp, vha->hw->srb_mempool);
}
int
@ -217,6 +224,7 @@ exit_fcp_prio_cfg:
bsg_job->job_done(bsg_job);
return ret;
}
static int
qla2x00_process_els(struct fc_bsg_job *bsg_job)
{
@ -230,7 +238,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
int req_sg_cnt, rsp_sg_cnt;
int rval = (DRIVER_ERROR << 16);
uint16_t nextlid = 0;
struct srb_ctx *els;
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
rport = bsg_job->rport;
@ -337,20 +344,21 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
/* Alloc SRB structure */
sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
rval = -ENOMEM;
goto done_unmap_sg;
}
els = sp->ctx;
els->type =
sp->type =
(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
els->name =
sp->name =
(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
"bsg_els_rpt" : "bsg_els_hst");
els->u.bsg_job = bsg_job;
sp->u.bsg_job = bsg_job;
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
ql_dbg(ql_dbg_user, vha, 0x700a,
"bsg rqst type: %s els type: %x - loop-id=%x "
@ -362,7 +370,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_unmap_sg;
@ -409,7 +416,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
uint16_t loop_id;
struct fc_port *fcport;
char *type = "FC_BSG_HST_CT";
struct srb_ctx *ct;
req_sg_cnt =
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
@ -486,19 +492,20 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
fcport->loop_id = loop_id;
/* Alloc SRB structure */
sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
ql_log(ql_log_warn, vha, 0x7015,
"qla2x00_get_ctx_bsg_sp failed.\n");
"qla2x00_get_sp failed.\n");
rval = -ENOMEM;
goto done_free_fcport;
}
ct = sp->ctx;
ct->type = SRB_CT_CMD;
ct->name = "bsg_ct";
ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
ct->u.bsg_job = bsg_job;
sp->type = SRB_CT_CMD;
sp->name = "bsg_ct";
sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
sp->u.bsg_job = bsg_job;
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
ql_dbg(ql_dbg_user, vha, 0x7016,
"bsg rqst type: %s else type: %x - "
@ -511,7 +518,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_free_fcport;
@ -1669,7 +1675,6 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
int cnt, que;
unsigned long flags;
struct req_que *req;
struct srb_ctx *sp_bsg;
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
@ -1681,11 +1686,9 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
sp_bsg = sp->ctx;
if (((sp_bsg->type == SRB_CT_CMD) ||
(sp_bsg->type == SRB_ELS_CMD_HST))
&& (sp_bsg->u.bsg_job == bsg_job)) {
if (((sp->type == SRB_CT_CMD) ||
(sp->type == SRB_ELS_CMD_HST))
&& (sp->u.bsg_job == bsg_job)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
@ -1715,7 +1718,6 @@ done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
return 0;
}

View File

@ -14,10 +14,11 @@
* | Module Init and Probe | 0x011f | 0x4b,0xfa |
* | Mailbox commands | 0x1139 | 0x112c-0x112e |
* | Device Discovery | 0x2084 | |
* | Queue Command and IO tracing | 0x302f | 0x3008 |
* | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401c | |
* | Async Events | 0x5057 | 0x5052 |
* | Async Events | 0x5057 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | Timer Routines | 0x6011 | 0x600e-0x600f |
* | User Space Interactions | 0x709e | 0x7018,0x702e |
* | | | 0x7039,0x7045 |

View File

@ -202,20 +202,12 @@ struct sd_dif_tuple {
/*
* SCSI Request Block
*/
typedef struct srb {
atomic_t ref_count;
struct fc_port *fcport;
uint32_t handle;
struct srb_cmd {
struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
uint16_t flags;
uint32_t request_sense_length;
uint8_t *request_sense_ptr;
void *ctx;
} srb_t;
};
/*
* SRB flag definitions
@ -254,10 +246,7 @@ struct srb_iocb {
} u;
struct timer_list timer;
void (*done)(srb_t *);
void (*free)(srb_t *);
void (*timeout)(srb_t *);
void (*timeout)(void *);
};
/* Values for srb_ctx type */
@ -268,16 +257,37 @@ struct srb_iocb {
#define SRB_CT_CMD 5
#define SRB_ADISC_CMD 6
#define SRB_TM_CMD 7
#define SRB_SCSI_CMD 8
struct srb_ctx {
typedef struct srb {
atomic_t ref_count;
struct fc_port *fcport;
uint32_t handle;
uint16_t flags;
uint16_t type;
char *name;
int iocbs;
union {
struct srb_iocb *iocb_cmd;
struct srb_iocb iocb_cmd;
struct fc_bsg_job *bsg_job;
struct srb_cmd scmd;
} u;
};
void (*done)(void *, void *, int);
void (*free)(void *, void *);
} srb_t;
#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
#define GET_CMD_SENSE_LEN(sp) \
(sp->u.scmd.request_sense_length)
#define SET_CMD_SENSE_LEN(sp, len) \
(sp->u.scmd.request_sense_length = len)
#define GET_CMD_SENSE_PTR(sp) \
(sp->u.scmd.request_sense_ptr)
#define SET_CMD_SENSE_PTR(sp, ptr) \
(sp->u.scmd.request_sense_ptr = ptr)
struct msg_echo_lb {
dma_addr_t send_dma;

View File

@ -71,8 +71,6 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
struct srb_iocb *);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
@ -156,8 +154,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *);
extern void qla2x00_sp_free_dma(void *, void *);
extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@ -542,6 +539,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
/* IOCB related functions */
extern int qla82xx_start_scsi(srb_t *);
extern void qla2x00_sp_free(void *, void *);
extern void qla2x00_sp_timeout(unsigned long);
extern void qla2x00_bsg_job_done(void *, void *, int);
extern void qla2x00_bsg_sp_free(void *, void *);
/* Interrupt related */
extern irqreturn_t qla82xx_intr_handler(int, void *);

View File

@ -41,11 +41,10 @@ static int qla25xx_init_queues(struct qla_hw_data *);
/* SRB Extensions ---------------------------------------------------------- */
static void
qla2x00_ctx_sp_timeout(unsigned long __data)
void
qla2x00_sp_timeout(unsigned long __data)
{
srb_t *sp = (srb_t *)__data;
struct srb_ctx *ctx;
struct srb_iocb *iocb;
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
@ -55,79 +54,25 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
spin_lock_irqsave(&ha->hardware_lock, flags);
req = ha->req_q_map[0];
req->outstanding_cmds[sp->handle] = NULL;
ctx = sp->ctx;
iocb = ctx->u.iocb_cmd;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
iocb->free(sp);
sp->free(fcport->vha, sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla2x00_ctx_sp_free(srb_t *sp)
void
qla2x00_sp_free(void *data, void *ptr)
{
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *iocb = ctx->u.iocb_cmd;
struct scsi_qla_host *vha = sp->fcport->vha;
srb_t *sp = (srb_t *)ptr;
struct srb_iocb *iocb = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
del_timer(&iocb->timer);
kfree(iocb);
kfree(ctx);
mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
mempool_free(sp, vha->hw->srb_mempool);
QLA_VHA_MARK_NOT_BUSY(vha);
}
inline srb_t *
qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
unsigned long tmo)
{
srb_t *sp = NULL;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx;
struct srb_iocb *iocb;
uint8_t bail;
QLA_VHA_MARK_BUSY(vha, bail);
if (bail)
return NULL;
sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
if (!sp)
goto done;
ctx = kzalloc(size, GFP_KERNEL);
if (!ctx) {
mempool_free(sp, ha->srb_mempool);
sp = NULL;
goto done;
}
iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
if (!iocb) {
mempool_free(sp, ha->srb_mempool);
sp = NULL;
kfree(ctx);
goto done;
}
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
ctx->iocbs = 1;
ctx->u.iocb_cmd = iocb;
iocb->free = qla2x00_ctx_sp_free;
init_timer(&iocb->timer);
if (!tmo)
goto done;
iocb->timer.expires = jiffies + tmo * HZ;
iocb->timer.data = (unsigned long)sp;
iocb->timer.function = qla2x00_ctx_sp_timeout;
add_timer(&iocb->timer);
done:
if (!sp)
QLA_VHA_MARK_NOT_BUSY(vha);
return sp;
}
/* Asynchronous Login/Logout Routines -------------------------------------- */
static inline unsigned long
@ -149,19 +94,19 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
}
static void
qla2x00_async_iocb_timeout(srb_t *sp)
qla2x00_async_iocb_timeout(void *data)
{
srb_t *sp = (srb_t *)data;
fc_port_t *fcport = sp->fcport;
struct srb_ctx *ctx = sp->ctx;
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
"Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->flags &= ~FCF_ASYNC_SENT;
if (ctx->type == SRB_LOGIN_CMD) {
struct srb_iocb *lio = ctx->u.iocb_cmd;
if (sp->type == SRB_LOGIN_CMD) {
struct srb_iocb *lio = &sp->u.iocb_cmd;
qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
@ -173,14 +118,16 @@ qla2x00_async_iocb_timeout(srb_t *sp)
}
static void
qla2x00_async_login_ctx_done(srb_t *sp)
qla2x00_async_login_sp_done(void *data, void *ptr, int res)
{
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *lio = ctx->u.iocb_cmd;
srb_t *sp = (srb_t *)ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
lio->u.logio.data);
lio->free(sp);
if (!test_bit(UNLOADING, &vha->dpc_flags))
qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
lio->u.logio.data);
sp->free(sp->fcport->vha, sp);
}
int
@ -188,22 +135,21 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
srb_t *sp;
struct srb_ctx *ctx;
struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
qla2x00_get_async_timeout(vha) + 2);
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
ctx = sp->ctx;
ctx->type = SRB_LOGIN_CMD;
ctx->name = "login";
lio = ctx->u.iocb_cmd;
sp->type = SRB_LOGIN_CMD;
sp->name = "login";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
lio->done = qla2x00_async_login_ctx_done;
sp->done = qla2x00_async_login_sp_done;
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
@ -219,42 +165,43 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
lio->free(sp);
sp->free(fcport->vha, sp);
done:
return rval;
}
static void
qla2x00_async_logout_ctx_done(srb_t *sp)
qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
{
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *lio = ctx->u.iocb_cmd;
srb_t *sp = (srb_t *)ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
lio->u.logio.data);
lio->free(sp);
if (!test_bit(UNLOADING, &vha->dpc_flags))
qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
lio->u.logio.data);
sp->free(sp->fcport->vha, sp);
}
int
qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
struct srb_ctx *ctx;
struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
qla2x00_get_async_timeout(vha) + 2);
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
ctx = sp->ctx;
ctx->type = SRB_LOGOUT_CMD;
ctx->name = "logout";
lio = ctx->u.iocb_cmd;
sp->type = SRB_LOGOUT_CMD;
sp->name = "logout";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
lio->done = qla2x00_async_logout_ctx_done;
sp->done = qla2x00_async_logout_sp_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
@ -266,20 +213,22 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
lio->free(sp);
sp->free(fcport->vha, sp);
done:
return rval;
}
static void
qla2x00_async_adisc_ctx_done(srb_t *sp)
qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
{
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *lio = ctx->u.iocb_cmd;
srb_t *sp = (srb_t *)ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
lio->u.logio.data);
lio->free(sp);
if (!test_bit(UNLOADING, &vha->dpc_flags))
qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
lio->u.logio.data);
sp->free(sp->fcport->vha, sp);
}
int
@ -287,22 +236,21 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
srb_t *sp;
struct srb_ctx *ctx;
struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
qla2x00_get_async_timeout(vha) + 2);
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
ctx = sp->ctx;
ctx->type = SRB_ADISC_CMD;
ctx->name = "adisc";
lio = ctx->u.iocb_cmd;
sp->type = SRB_ADISC_CMD;
sp->name = "adisc";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
lio->done = qla2x00_async_adisc_ctx_done;
sp->done = qla2x00_async_adisc_sp_done;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
@ -316,46 +264,62 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
lio->free(sp);
sp->free(fcport->vha, sp);
done:
return rval;
}
static void
qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
{
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
srb_t *sp = (srb_t *)ptr;
struct srb_iocb *iocb = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
uint32_t flags;
uint16_t lun;
int rval;
qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
iocb->free(sp);
if (!test_bit(UNLOADING, &vha->dpc_flags)) {
flags = iocb->u.tmf.flags;
lun = (uint16_t)iocb->u.tmf.lun;
/* Issue Marker IOCB */
rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
ql_dbg(ql_dbg_taskm, vha, 0x8030,
"TM IOCB failed (%x).\n", rval);
}
}
sp->free(sp->fcport->vha, sp);
}
int
qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
uint32_t tag)
{
struct scsi_qla_host *vha = fcport->vha;
srb_t *sp;
struct srb_ctx *ctx;
struct srb_iocb *tcf;
int rval;
rval = QLA_FUNCTION_FAILED;
sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
qla2x00_get_async_timeout(vha) + 2);
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
ctx = sp->ctx;
ctx->type = SRB_TM_CMD;
ctx->name = "tmf";
tcf = ctx->u.iocb_cmd;
tcf->u.tmf.flags = flags;
sp->type = SRB_TM_CMD;
sp->name = "tmf";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
tcf = &sp->u.iocb_cmd;
tcf->u.tmf.flags = tm_flags;
tcf->u.tmf.lun = lun;
tcf->u.tmf.data = tag;
tcf->timeout = qla2x00_async_iocb_timeout;
tcf->done = qla2x00_async_tm_cmd_ctx_done;
sp->done = qla2x00_async_tm_cmd_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@ -368,7 +332,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
return rval;
done_free_sp:
tcf->free(sp);
sp->free(fcport->vha, sp);
done:
return rval;
}
@ -452,30 +416,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
return;
}
void
qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
struct srb_iocb *iocb)
{
int rval;
uint32_t flags;
uint16_t lun;
flags = iocb->u.tmf.flags;
lun = (uint16_t)iocb->u.tmf.lun;
/* Issue Marker IOCB */
rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
vha->hw->rsp_q_map[0], fcport->loop_id, lun,
flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
ql_dbg(ql_dbg_taskm, vha, 0x8030,
"TM IOCB failed (%x).\n", rval);
}
return;
}
/****************************************************************************/
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/

View File

@ -72,16 +72,19 @@ static inline void
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
{
struct dsd_dma *dsd_ptr, *tdsd_ptr;
struct crc_context *ctx;
ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
/* clean up allocated prev pool */
list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
&((struct crc_context *)sp->ctx)->dsd_list, list) {
&ctx->dsd_list, list) {
dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
dsd_ptr->dsd_list_dma);
list_del(&dsd_ptr->list);
kfree(dsd_ptr);
}
INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list);
INIT_LIST_HEAD(&ctx->dsd_list);
}
static inline void
@ -113,8 +116,7 @@ qla2x00_hba_err_chk_enabled(srb_t *sp)
return 0;
*
*/
switch (scsi_get_prot_op(sp->cmd)) {
switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
if (ql2xenablehba_err_chk >= 1)
@ -144,3 +146,38 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
}
static inline srb_t *
qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
struct qla_hw_data *ha = vha->hw;
uint8_t bail;
QLA_VHA_MARK_BUSY(vha, bail);
if (unlikely(bail))
return NULL;
sp = mempool_alloc(ha->srb_mempool, flag);
if (!sp)
goto done;
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->iocbs = 1;
done:
if (!sp)
QLA_VHA_MARK_NOT_BUSY(vha);
return sp;
}
static inline void
qla2x00_init_timer(srb_t *sp, unsigned long tmo)
{
init_timer(&sp->u.iocb_cmd.timer);
sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
sp->u.iocb_cmd.timer.data = (unsigned long)sp;
sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
}

View File

@ -22,18 +22,19 @@ static inline uint16_t
qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
cflags = 0;
/* Set transfer direction */
if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
sp->fcport->vha->hw->qla_stats.output_bytes +=
scsi_bufflen(sp->cmd);
} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
sp->fcport->vha->hw->qla_stats.input_bytes +=
scsi_bufflen(sp->cmd);
scsi_bufflen(cmd);
}
return (cflags);
}
@ -143,12 +144,13 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
static inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
uint8_t guard = scsi_host_get_guard(cmd->device->host);
/* We only support T10 DIF right now */
if (guard != SHOST_DIX_GUARD_CRC) {
ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
"Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
"Unsupported guard: %d for cmd=%p.\n", guard, cmd);
return 0;
}
@ -156,7 +158,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
*fw_prot_opts = 0;
/* Translate SCSI opcode to a protection opcode */
switch (scsi_get_prot_op(sp->cmd)) {
switch (scsi_get_prot_op(cmd)) {
case SCSI_PROT_READ_STRIP:
*fw_prot_opts |= PO_MODE_DIF_REMOVE;
break;
@ -180,7 +182,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
break;
}
return scsi_prot_sg_count(sp->cmd);
return scsi_prot_sg_count(cmd);
}
/*
@ -201,7 +203,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
struct scatterlist *sg;
int i;
cmd = sp->cmd;
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 2 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@ -259,7 +261,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
struct scatterlist *sg;
int i;
cmd = sp->cmd;
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@ -333,7 +335,7 @@ qla2x00_start_scsi(srb_t *sp)
vha = sp->fcport->vha;
ha = vha->hw;
reg = &ha->iobase->isp;
cmd = sp->cmd;
cmd = GET_CMD_SP(sp);
req = ha->req_q_map[0];
rsp = ha->rsp_q_map[0];
/* So we know we haven't pci_map'ed anything yet */
@ -391,7 +393,7 @@ qla2x00_start_scsi(srb_t *sp)
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
cmd_pkt = (cmd_entry_t *)req->ring_ptr;
@ -403,7 +405,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Set target ID and LUN number*/
SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
/* Update tagged queuing modifier */
if (scsi_populate_tag_msg(cmd, tag)) {
@ -608,7 +610,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
struct dsd_dma *dsd_ptr;
struct ct6_dsd *ctx;
cmd = sp->cmd;
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@ -635,7 +637,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
}
cur_seg = scsi_sglist(cmd);
ctx = sp->ctx;
ctx = GET_CMD_CTX_SP(sp);
while (tot_dsds) {
avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@ -724,7 +726,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
int i;
struct req_que *req;
cmd = sp->cmd;
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@ -744,12 +746,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
sp->fcport->vha->hw->qla_stats.output_bytes +=
scsi_bufflen(sp->cmd);
scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
sp->fcport->vha->hw->qla_stats.input_bytes +=
scsi_bufflen(sp->cmd);
scsi_bufflen(cmd);
}
/* One DSD is available in the Command Type 3 IOCB */
@ -796,7 +798,7 @@ static inline void
qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
unsigned int protcnt)
{
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
switch (scsi_get_prot_type(cmd)) {
@ -951,16 +953,16 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
struct qla2_sgx sgx;
dma_addr_t sle_dma;
uint32_t sle_dma_len, tot_prot_dma_len = 0;
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
prot_int = cmd->device->sector_size;
memset(&sgx, 0, sizeof(struct qla2_sgx));
sgx.tot_bytes = scsi_bufflen(sp->cmd);
sgx.cur_sg = scsi_sglist(sp->cmd);
sgx.tot_bytes = scsi_bufflen(cmd);
sgx.cur_sg = scsi_sglist(cmd);
sgx.sp = sp;
sg_prot = scsi_prot_sglist(sp->cmd);
sg_prot = scsi_prot_sglist(cmd);
while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
@ -994,7 +996,7 @@ alloc_and_fill:
}
list_add_tail(&dsd_ptr->list,
&((struct crc_context *)sp->ctx)->dsd_list);
&((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
@ -1043,11 +1045,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint32_t *cur_dsd = dsd;
int i;
uint16_t used_dsds = tot_dsds;
scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
uint8_t *cp;
scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
@ -1077,7 +1080,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
}
list_add_tail(&dsd_ptr->list,
&((struct crc_context *)sp->ctx)->dsd_list);
&((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
@ -1090,17 +1093,16 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
sle_dma = sg_dma_address(sg);
ql_dbg(ql_dbg_io, vha, 0x300a,
"sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
sp->cmd);
i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
ql_dbg(ql_dbg_io, vha, 0x300b,
"User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
"User data buffer=%p for cmd=%p.\n", cp, cmd);
}
}
/* Null termination */
@ -1127,8 +1129,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
uint8_t *cp;
cmd = sp->cmd;
cmd = GET_CMD_SP(sp);
scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
@ -1159,7 +1160,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
}
list_add_tail(&dsd_ptr->list,
&((struct crc_context *)sp->ctx)->dsd_list);
&((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
@ -1170,7 +1171,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
cur_dsd = (uint32_t *)next_dsd;
}
sle_dma = sg_dma_address(sg);
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
ql_dbg(ql_dbg_io, vha, 0x3027,
"%s(): %p, sg_entry %d - "
"addr=0x%x0x%x, len=%d.\n",
@ -1181,7 +1182,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
ql_dbg(ql_dbg_io, vha, 0x3028,
"%s(): Protection Data buffer = %p.\n", __func__,
@ -1227,7 +1228,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
dma_addr_t crc_ctx_dma;
char tag[2];
cmd = sp->cmd;
cmd = GET_CMD_SP(sp);
sgc = 0;
/* Update entry type to indicate Command Type CRC_2 IOCB */
@ -1255,15 +1256,15 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
__constant_cpu_to_le16(CF_READ_DATA);
}
if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
(scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
(scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
(scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
(scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
(scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
(scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
bundling = 0;
/* Allocate CRC context from global pool */
crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
GFP_ATOMIC, &crc_ctx_dma);
crc_ctx_pkt = sp->u.scmd.ctx =
dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
if (!crc_ctx_pkt)
goto crc_queuing_error;
@ -1309,7 +1310,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
fcp_cmnd->additional_cdb_len |= 2;
int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@ -1344,7 +1345,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
blk_size = cmd->device->sector_size;
dif_bytes = (data_bytes / blk_size) * 8;
switch (scsi_get_prot_op(sp->cmd)) {
switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
total_bytes = data_bytes;
@ -1444,7 +1445,7 @@ qla24xx_start_scsi(srb_t *sp)
uint16_t tot_dsds;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
char tag[2];
@ -1509,7 +1510,7 @@ qla24xx_start_scsi(srb_t *sp)
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
@ -1528,7 +1529,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vp_idx;
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
@ -1610,7 +1611,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
uint16_t fw_prot_opts = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct cmd_type_crc_2 *cmd_pkt;
@ -1727,7 +1728,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
/* Fill-in common area */
@ -1743,7 +1744,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* Total Data and protection segment(s) */
@ -1796,7 +1797,7 @@ queuing_error:
static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
{
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct qla_hw_data *ha = sp->fcport->vha->hw;
int affinity = cmd->request->cpu;
@ -1817,7 +1818,6 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
uint32_t index, handle;
request_t *pkt;
uint16_t cnt, req_cnt;
struct srb_ctx *ctx;
pkt = NULL;
req_cnt = 1;
@ -1847,10 +1847,8 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
sp->handle = handle;
/* Adjust entry-counts as needed. */
if (sp->ctx) {
ctx = sp->ctx;
req_cnt = ctx->iocbs;
}
if (sp->type != SRB_SCSI_CMD)
req_cnt = sp->iocbs;
skip_cmd_array:
/* Check for room on request queue. */
@ -1888,8 +1886,7 @@ queuing_error:
static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *lio = ctx->u.iocb_cmd;
struct srb_iocb *lio = &sp->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
@ -1908,8 +1905,7 @@ static void
qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *lio = ctx->u.iocb_cmd;
struct srb_iocb *lio = &sp->u.iocb_cmd;
uint16_t opts;
mbx->entry_type = MBX_IOCB_TYPE;
@ -1998,8 +1994,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
struct fc_port *fcport = sp->fcport;
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *iocb = ctx->u.iocb_cmd;
struct srb_iocb *iocb = &sp->u.iocb_cmd;
struct req_que *req = vha->req;
flags = iocb->u.tmf.flags;
@ -2026,7 +2021,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@ -2040,7 +2035,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
els_iocb->opcode =
(((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
sp->type == SRB_ELS_CMD_RPT ?
bsg_job->request->rqst_data.r_els.els_code :
bsg_job->request->rqst_data.h_els.command_code;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@ -2077,7 +2072,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
int entry_count = 1;
@ -2154,7 +2149,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
int entry_count = 1;
@ -2244,12 +2239,12 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
char tag[2];
char tag[2];
/* Setup device pointers. */
ret = 0;
reg = &ha->iobase->isp82;
cmd = sp->cmd;
cmd = GET_CMD_SP(sp);
req = vha->req;
rsp = ha->rsp_q_map[0];
@ -2353,12 +2348,14 @@ sufficient_dsds:
if (req->cnt < (req_cnt + 2))
goto queuing_error;
ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!sp->ctx) {
ctx = sp->u.scmd.ctx =
mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!ctx) {
ql_log(ql_log_fatal, vha, 0x3010,
"Failed to allocate ctx for cmd=%p.\n", cmd);
goto queuing_error;
}
memset(ctx, 0, sizeof(struct ct6_dsd));
ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
GFP_ATOMIC, &ctx->fcp_cmnd_dma);
@ -2409,12 +2406,12 @@ sufficient_dsds:
if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
goto queuing_error_fcp_cmnd;
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* build FCP_CMND IU */
memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
if (cmd->sc_data_direction == DMA_TO_DEVICE)
@ -2494,9 +2491,9 @@ sufficient_dsds:
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vp_idx;
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
sizeof(cmd_pkt->lun));
sizeof(cmd_pkt->lun));
/*
* Update tagged queuing modifier -- default is TSK_SIMPLE (0).
@ -2537,7 +2534,7 @@ sufficient_dsds:
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
wmb();
@ -2583,9 +2580,9 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
if (sp->ctx) {
mempool_free(sp->ctx, ha->ctx_mempool);
sp->ctx = NULL;
if (sp->u.scmd.ctx) {
mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
sp->u.scmd.ctx = NULL;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@ -2598,7 +2595,6 @@ qla2x00_start_sp(srb_t *sp)
int rval;
struct qla_hw_data *ha = sp->fcport->vha->hw;
void *pkt;
struct srb_ctx *ctx = sp->ctx;
unsigned long flags;
rval = QLA_FUNCTION_FAILED;
@ -2611,7 +2607,7 @@ qla2x00_start_sp(srb_t *sp)
}
rval = QLA_SUCCESS;
switch (ctx->type) {
switch (sp->type) {
case SRB_LOGIN_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_login_iocb(sp, pkt) :

View File

@ -853,8 +853,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
req->outstanding_cmds[index] = NULL;
/* Save ISP completion status */
sp->cmd->result = DID_OK << 16;
qla2x00_sp_compl(ha, sp);
sp->done(ha, sp, DID_OK << 16);
} else {
ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
@ -911,7 +910,6 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fc_port_t *fcport;
srb_t *sp;
struct srb_iocb *lio;
struct srb_ctx *ctx;
uint16_t *data;
uint16_t status;
@ -919,9 +917,8 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
ctx = sp->ctx;
lio = ctx->u.iocb_cmd;
type = ctx->name;
lio = &sp->u.iocb_cmd;
type = sp->name;
fcport = sp->fcport;
data = lio->u.logio.data;
@ -945,7 +942,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
status = le16_to_cpu(mbx->status);
if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
status = 0;
if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
@ -956,7 +953,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->mb1));
data[0] = MBS_COMMAND_COMPLETE;
if (ctx->type == SRB_LOGIN_CMD) {
if (sp->type == SRB_LOGIN_CMD) {
fcport->port_type = FCT_TARGET;
if (le16_to_cpu(mbx->mb1) & BIT_0)
fcport->port_type = FCT_INITIATOR;
@ -987,7 +984,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->mb7));
logio_done:
lio->done(sp);
sp->done(vha, sp, 0);
}
static void
@ -996,29 +993,18 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "CT_IOCB";
const char *type;
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
struct srb_ctx *sp_bsg;
struct fc_bsg_job *bsg_job;
uint16_t comp_status;
int res;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
sp_bsg = sp->ctx;
bsg_job = sp_bsg->u.bsg_job;
bsg_job = sp->u.bsg_job;
type = NULL;
switch (sp_bsg->type) {
case SRB_CT_CMD:
type = "ct pass-through";
break;
default:
ql_log(ql_log_warn, vha, 0x5047,
"Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
return;
}
type = "ct pass-through";
comp_status = le16_to_cpu(pkt->comp_status);
@ -1030,7 +1016,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
bsg_job->reply->result = DID_OK << 16;
res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
@ -1043,30 +1029,19 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
ql_log(ql_log_warn, vha, 0x5049,
"CT pass-through-%s error "
"comp_status-status=0x%x.\n", type, comp_status);
bsg_job->reply->result = DID_ERROR << 16;
res = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
}
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
(uint8_t *)pkt, sizeof(*pkt));
} else {
bsg_job->reply->result = DID_OK << 16;
res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
kfree(sp->fcport);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
bsg_job->job_done(bsg_job);
sp->done(vha, sp, res);
}
static void
@ -1075,22 +1050,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "ELS_CT_IOCB";
const char *type;
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
struct srb_ctx *sp_bsg;
struct fc_bsg_job *bsg_job;
uint16_t comp_status;
uint32_t fw_status[3];
uint8_t* fw_sts_ptr;
int res;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
sp_bsg = sp->ctx;
bsg_job = sp_bsg->u.bsg_job;
bsg_job = sp->u.bsg_job;
type = NULL;
switch (sp_bsg->type) {
switch (sp->type) {
case SRB_ELS_CMD_RPT:
case SRB_ELS_CMD_HST:
type = "els";
@ -1100,7 +1073,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
default:
ql_log(ql_log_warn, vha, 0x503e,
"Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
"Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
return;
}
@ -1116,9 +1089,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
bsg_job->reply->result = DID_OK << 16;
res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
ql_log(ql_log_info, vha, 0x503f,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
@ -1138,7 +1111,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt)->error_subcode_1),
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_2));
bsg_job->reply->result = DID_ERROR << 16;
res = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
@ -1147,23 +1120,12 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
(uint8_t *)pkt, sizeof(*pkt));
}
else {
bsg_job->reply->result = DID_OK << 16;
res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
dma_unmap_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
dma_unmap_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
(sp_bsg->type == SRB_CT_CMD))
kfree(sp->fcport);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
bsg_job->job_done(bsg_job);
sp->done(vha, sp, res);
}
static void
@ -1175,7 +1137,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
fc_port_t *fcport;
srb_t *sp;
struct srb_iocb *lio;
struct srb_ctx *ctx;
uint16_t *data;
uint32_t iop[2];
@ -1183,9 +1144,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
ctx = sp->ctx;
lio = ctx->u.iocb_cmd;
type = ctx->name;
lio = &sp->u.iocb_cmd;
type = sp->name;
fcport = sp->fcport;
data = lio->u.logio.data;
@ -1213,7 +1173,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
le32_to_cpu(logio->io_parameter[0]));
data[0] = MBS_COMMAND_COMPLETE;
if (ctx->type != SRB_LOGIN_CMD)
if (sp->type != SRB_LOGIN_CMD)
goto logio_done;
iop[0] = le32_to_cpu(logio->io_parameter[0]);
@ -1256,7 +1216,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
le32_to_cpu(logio->io_parameter[1]));
logio_done:
lio->done(sp);
sp->done(vha, sp, 0);
}
static void
@ -1268,7 +1228,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fc_port_t *fcport;
srb_t *sp;
struct srb_iocb *iocb;
struct srb_ctx *ctx;
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
int error = 1;
@ -1276,9 +1235,8 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
ctx = sp->ctx;
iocb = ctx->u.iocb_cmd;
type = ctx->name;
iocb = &sp->u.iocb_cmd;
type = sp->name;
fcport = sp->fcport;
if (sts->entry_status) {
@ -1312,7 +1270,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
(uint8_t *)sts, sizeof(*sts));
}
iocb->done(sp);
sp->done(vha, sp, 0);
}
/**
@ -1398,25 +1356,32 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
static inline void
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
uint32_t sense_len, struct rsp_que *rsp)
uint32_t sense_len, struct rsp_que *rsp, int res)
{
struct scsi_qla_host *vha = sp->fcport->vha;
struct scsi_cmnd *cp = sp->cmd;
struct scsi_cmnd *cp = GET_CMD_SP(sp);
uint32_t track_sense_len;
if (sense_len >= SCSI_SENSE_BUFFERSIZE)
sense_len = SCSI_SENSE_BUFFERSIZE;
sp->request_sense_length = sense_len;
sp->request_sense_ptr = cp->sense_buffer;
if (sp->request_sense_length > par_sense_len)
SET_CMD_SENSE_LEN(sp, sense_len);
SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
track_sense_len = sense_len;
if (sense_len > par_sense_len)
sense_len = par_sense_len;
memcpy(cp->sense_buffer, sense_data, sense_len);
sp->request_sense_ptr += sense_len;
sp->request_sense_length -= sense_len;
if (sp->request_sense_length != 0)
SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
track_sense_len -= sense_len;
SET_CMD_SENSE_LEN(sp, track_sense_len);
if (track_sense_len != 0) {
rsp->status_srb = sp;
cp->result = res;
}
if (sense_len) {
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
@ -1444,7 +1409,7 @@ static inline int
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
struct scsi_qla_host *vha = sp->fcport->vha;
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
uint8_t *ap = &sts24->data[12];
uint8_t *ep = &sts24->data[20];
uint32_t e_ref_tag, a_ref_tag;
@ -1588,6 +1553,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
uint16_t que;
struct req_que *req;
int logit = 1;
int res = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@ -1627,7 +1593,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
qla2xxx_wake_dpc(vha);
return;
}
cp = sp->cmd;
cp = GET_CMD_SP(sp);
if (cp == NULL) {
ql_dbg(ql_dbg_io, vha, 0x3018,
"Command already returned (0x%x/%p).\n",
@ -1680,7 +1646,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"FCP I/O protocol failure (0x%x/0x%x).\n",
rsp_info_len, rsp_info[3]);
cp->result = DID_BUS_BUSY << 16;
res = DID_BUS_BUSY << 16;
goto out;
}
}
@ -1697,7 +1663,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
case CS_COMPLETE:
case CS_QUEUE_FULL:
if (scsi_status == 0) {
cp->result = DID_OK << 16;
res = DID_OK << 16;
break;
}
if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
@ -1712,11 +1678,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
res = DID_ERROR << 16;
break;
}
}
cp->result = DID_OK << 16 | lscsi_status;
res = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
ql_dbg(ql_dbg_io, vha, 0x301b,
@ -1732,7 +1698,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
break;
qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
rsp);
rsp, res);
break;
case CS_DATA_UNDERRUN:
@ -1746,7 +1712,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"(0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
}
@ -1758,7 +1724,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
res = DID_ERROR << 16;
break;
}
} else {
@ -1766,11 +1732,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"Dropped frame(s) detected (0x%x "
"of 0x%x bytes).\n", resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
}
cp->result = DID_OK << 16 | lscsi_status;
res = DID_OK << 16 | lscsi_status;
logit = 0;
check_scsi_status:
@ -1793,7 +1759,7 @@ check_scsi_status:
break;
qla2x00_handle_sense(sp, sense_data, par_sense_len,
sense_len, rsp);
sense_len, rsp, res);
}
break;
@ -1810,7 +1776,7 @@ check_scsi_status:
* while we try to recover so instruct the mid layer
* to requeue until the class decides how to handle this.
*/
cp->result = DID_TRANSPORT_DISRUPTED << 16;
res = DID_TRANSPORT_DISRUPTED << 16;
if (comp_status == CS_TIMEOUT) {
if (IS_FWI2_CAPABLE(ha))
@ -1829,14 +1795,14 @@ check_scsi_status:
break;
case CS_ABORTED:
cp->result = DID_RESET << 16;
res = DID_RESET << 16;
break;
case CS_DIF_ERROR:
logit = qla2x00_handle_dif_error(sp, sts24);
break;
default:
cp->result = DID_ERROR << 16;
res = DID_ERROR << 16;
break;
}
@ -1847,7 +1813,7 @@ out:
"nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
"cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
comp_status, scsi_status, cp->result, vha->host_no,
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
@ -1856,7 +1822,7 @@ out:
resid_len, fw_resid_len);
if (rsp->status_srb == NULL)
qla2x00_sp_compl(ha, sp);
sp->done(ha, sp, res);
}
/**
@ -1869,84 +1835,52 @@ out:
static void
qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
{
uint8_t sense_sz = 0;
uint8_t sense_sz = 0;
struct qla_hw_data *ha = rsp->hw;
struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
srb_t *sp = rsp->status_srb;
srb_t *sp = rsp->status_srb;
struct scsi_cmnd *cp;
uint32_t sense_len;
uint8_t *sense_ptr;
if (sp != NULL && sp->request_sense_length != 0) {
cp = sp->cmd;
if (cp == NULL) {
ql_log(ql_log_warn, vha, 0x3025,
"cmd is NULL: already returned to OS (sp=%p).\n",
sp);
if (!sp || !GET_CMD_SENSE_LEN(sp))
return;
rsp->status_srb = NULL;
return;
}
sense_len = GET_CMD_SENSE_LEN(sp);
sense_ptr = GET_CMD_SENSE_PTR(sp);
if (sp->request_sense_length > sizeof(pkt->data)) {
sense_sz = sizeof(pkt->data);
} else {
sense_sz = sp->request_sense_length;
}
cp = GET_CMD_SP(sp);
if (cp == NULL) {
ql_log(ql_log_warn, vha, 0x3025,
"cmd is NULL: already returned to OS (sp=%p).\n", sp);
/* Move sense data. */
if (IS_FWI2_CAPABLE(ha))
host_to_fcp_swap(pkt->data, sizeof(pkt->data));
memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
sp->request_sense_ptr, sense_sz);
sp->request_sense_ptr += sense_sz;
sp->request_sense_length -= sense_sz;
/* Place command on done queue. */
if (sp->request_sense_length == 0) {
rsp->status_srb = NULL;
qla2x00_sp_compl(ha, sp);
}
rsp->status_srb = NULL;
return;
}
}
static int
qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
{
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx;
if (sense_len > sizeof(pkt->data))
sense_sz = sizeof(pkt->data);
else
sense_sz = sense_len;
if (!sp->ctx)
return 1;
/* Move sense data. */
if (IS_FWI2_CAPABLE(ha))
host_to_fcp_swap(pkt->data, sizeof(pkt->data));
memcpy(sense_ptr, pkt->data, sense_sz);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
sense_ptr, sense_sz);
ctx = sp->ctx;
sense_len -= sense_sz;
sense_ptr += sense_sz;
if (ctx->type == SRB_LOGIN_CMD ||
ctx->type == SRB_LOGOUT_CMD ||
ctx->type == SRB_TM_CMD) {
ctx->u.iocb_cmd->done(sp);
return 0;
} else if (ctx->type == SRB_ADISC_CMD) {
ctx->u.iocb_cmd->free(sp);
return 0;
} else {
struct fc_bsg_job *bsg_job;
SET_CMD_SENSE_PTR(sp, sense_ptr);
SET_CMD_SENSE_LEN(sp, sense_len);
bsg_job = ctx->u.bsg_job;
if (ctx->type == SRB_ELS_CMD_HST ||
ctx->type == SRB_CT_CMD)
kfree(sp->fcport);
bsg_job->reply->reply_data.ctels_reply.status =
FC_CTELS_STATUS_OK;
bsg_job->reply->result = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
bsg_job->job_done(bsg_job);
return 0;
/* Place command on done queue. */
if (sense_len == 0) {
rsp->status_srb = NULL;
sp->done(ha, sp, cp->result);
}
return 1;
}
/**
@ -1962,43 +1896,18 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
const char func[] = "ERROR-IOCB";
uint16_t que = MSW(pkt->handle);
struct req_que *req = ha->req_q_map[que];
int res = DID_ERROR << 16;
if (pkt->entry_status & RF_INV_E_ORDER)
ql_dbg(ql_dbg_async, vha, 0x502a,
"Invalid Entry Order.\n");
else if (pkt->entry_status & RF_INV_E_COUNT)
ql_dbg(ql_dbg_async, vha, 0x502b,
"Invalid Entry Count.\n");
else if (pkt->entry_status & RF_INV_E_PARAM)
ql_dbg(ql_dbg_async, vha, 0x502c,
"Invalid Entry Parameter.\n");
else if (pkt->entry_status & RF_INV_E_TYPE)
ql_dbg(ql_dbg_async, vha, 0x502d,
"Invalid Entry Type.\n");
else if (pkt->entry_status & RF_BUSY)
ql_dbg(ql_dbg_async, vha, 0x502e,
"Busy.\n");
else
ql_dbg(ql_dbg_async, vha, 0x502f,
"UNKNOWN flag error.\n");
ql_dbg(ql_dbg_async, vha, 0x502a,
"type of error status in response: 0x%x\n", pkt->entry_status);
if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
if (qla2x00_free_sp_ctx(vha, sp)) {
if (pkt->entry_status &
(RF_INV_E_ORDER | RF_INV_E_COUNT |
RF_INV_E_PARAM | RF_INV_E_TYPE)) {
sp->cmd->result = DID_ERROR << 16;
} else if (pkt->entry_status & RF_BUSY) {
sp->cmd->result = DID_BUS_BUSY << 16;
} else {
sp->cmd->result = DID_ERROR << 16;
}
qla2x00_sp_compl(ha, sp);
}
} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
|| pkt->entry_type == COMMAND_TYPE_6) {
if (sp)
sp->done(ha, sp, res);
else {
ql_log(ql_log_warn, vha, 0x5030,
"Error entry - invalid handle.\n");

View File

@ -874,6 +874,7 @@ qla2x00_abort_command(srb_t *sp)
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
@ -896,7 +897,7 @@ qla2x00_abort_command(srb_t *sp)
mcp->mb[1] = fcport->loop_id << 8;
mcp->mb[2] = (uint16_t)handle;
mcp->mb[3] = (uint16_t)(handle >> 16);
mcp->mb[6] = (uint16_t)sp->cmd->device->lun;
mcp->mb[6] = (uint16_t)cmd->device->lun;
mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;

View File

@ -3608,7 +3608,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (!sp->ctx ||
if (!sp->u.scmd.ctx ||
(sp->flags & SRB_FCP_CMND_DMA_VALID)) {
spin_unlock_irqrestore(
&ha->hardware_lock, flags);

View File

@ -304,7 +304,6 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
struct req_que **, struct rsp_que **);
static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
static void qla2x00_sp_free_dma(srb_t *);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha)
@ -559,28 +558,75 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
return str;
}
static inline srb_t *
qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
struct scsi_cmnd *cmd)
void
qla2x00_sp_free_dma(void *vha, void *ptr)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
srb_t *sp = (srb_t *)ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct qla_hw_data *ha = sp->fcport->vha->hw;
void *ctx = GET_CMD_CTX_SP(sp);
sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
if (!sp) {
ql_log(ql_log_warn, vha, 0x3006,
"Memory allocation failed for sp.\n");
return sp;
if (sp->flags & SRB_DMA_VALID) {
scsi_dma_unmap(cmd);
sp->flags &= ~SRB_DMA_VALID;
}
atomic_set(&sp->ref_count, 1);
sp->fcport = fcport;
sp->cmd = cmd;
sp->flags = 0;
CMD_SP(cmd) = (void *)sp;
sp->ctx = NULL;
if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
scsi_prot_sg_count(cmd), cmd->sc_data_direction);
sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
}
return sp;
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
qla2x00_clean_dsd_pool(ha, sp);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
dma_pool_free(ha->dl_dma_pool, ctx,
((struct crc_context *)ctx)->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
mempool_free(ctx1, ha->ctx_mempool);
ctx1 = NULL;
}
CMD_SP(cmd) = NULL;
mempool_free(sp, ha->srb_mempool);
}
static void
qla2x00_sp_compl(void *data, void *ptr, int res)
{
struct qla_hw_data *ha = (struct qla_hw_data *)data;
srb_t *sp = (srb_t *)ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
cmd->result = res;
if (atomic_read(&sp->ref_count) == 0) {
ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
"SP reference-count to ZERO -- sp=%p cmd=%p.\n",
sp, GET_CMD_SP(sp));
if (ql2xextended_error_logging & ql_dbg_io)
BUG();
return;
}
if (!atomic_dec_and_test(&sp->ref_count))
return;
qla2x00_sp_free_dma(ha, sp);
cmd->scsi_done(cmd);
}
static int
@ -644,10 +690,17 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
}
sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
if (!sp)
goto qc24_host_busy;
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
atomic_set(&sp->ref_count, 1);
CMD_SP(cmd) = (void *)sp;
sp->free = qla2x00_sp_free_dma;
sp->done = qla2x00_sp_compl;
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io, vha, 0x3013,
@ -658,8 +711,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
return 0;
qc24_host_busy_free_sp:
qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
qla2x00_sp_free_dma(ha, sp);
qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@ -893,7 +945,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(&ha->hardware_lock, flags);
qla2x00_sp_compl(ha, sp);
sp->done(ha, sp, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Did the command return during mailbox execution? */
@ -925,6 +977,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
srb_t *sp;
struct scsi_cmnd *cmd;
status = QLA_SUCCESS;
@ -935,28 +988,29 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
if ((sp->ctx) && !IS_PROT_IO(sp))
if (sp->type != SRB_SCSI_CMD)
continue;
if (vha->vp_idx != sp->fcport->vha->vp_idx)
continue;
match = 0;
cmd = GET_CMD_SP(sp);
switch (type) {
case WAIT_HOST:
match = 1;
break;
case WAIT_TARGET:
match = sp->cmd->device->id == t;
match = cmd->device->id == t;
break;
case WAIT_LUN:
match = (sp->cmd->device->id == t &&
sp->cmd->device->lun == l);
match = (cmd->device->id == t &&
cmd->device->lun == l);
break;
}
if (!match)
continue;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
status = qla2x00_eh_wait_on_command(sp->cmd);
status = qla2x00_eh_wait_on_command(cmd);
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@ -1249,7 +1303,6 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
int que, cnt;
unsigned long flags;
srb_t *sp;
struct srb_ctx *ctx;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
@ -1262,31 +1315,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
if (!sp->ctx ||
(sp->flags & SRB_FCP_CMND_DMA_VALID) ||
IS_PROT_IO(sp)) {
sp->cmd->result = res;
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
if (ctx->type == SRB_ELS_CMD_RPT ||
ctx->type == SRB_ELS_CMD_HST ||
ctx->type == SRB_CT_CMD) {
struct fc_bsg_job *bsg_job =
ctx->u.bsg_job;
if (bsg_job->request->msgcode
== FC_BSG_HST_CT)
kfree(sp->fcport);
bsg_job->req->errors = 0;
bsg_job->reply->result = res;
bsg_job->job_done(bsg_job);
kfree(sp->ctx);
mempool_free(sp,
ha->srb_mempool);
} else {
ctx->u.iocb_cmd->free(sp);
}
}
sp->done(vha, sp, res);
}
}
}
@ -3820,75 +3849,6 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
}
}
static void
qla2x00_sp_free_dma(srb_t *sp)
{
struct scsi_cmnd *cmd = sp->cmd;
struct qla_hw_data *ha = sp->fcport->vha->hw;
if (sp->flags & SRB_DMA_VALID) {
scsi_dma_unmap(cmd);
sp->flags &= ~SRB_DMA_VALID;
}
if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
scsi_prot_sg_count(cmd), cmd->sc_data_direction);
sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
}
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
qla2x00_clean_dsd_pool(ha, sp);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
dma_pool_free(ha->dl_dma_pool, sp->ctx,
((struct crc_context *)sp->ctx)->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
struct ct6_dsd *ctx = sp->ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
ctx->fcp_cmnd_dma);
list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
ha->gbl_dsd_avail += ctx->dsd_use_cnt;
mempool_free(sp->ctx, ha->ctx_mempool);
sp->ctx = NULL;
}
CMD_SP(cmd) = NULL;
}
static void
qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
{
struct scsi_cmnd *cmd = sp->cmd;
qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
cmd->scsi_done(cmd);
}
void
qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
{
if (atomic_read(&sp->ref_count) == 0) {
ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
"SP reference-count to ZERO -- sp=%p cmd=%p.\n",
sp, sp->cmd);
if (ql2xextended_error_logging & ql_dbg_io)
BUG();
return;
}
if (!atomic_dec_and_test(&sp->ref_count))
return;
qla2x00_sp_final_compl(ha, sp);
}
/**************************************************************************
* qla2x00_timer
*
@ -3959,7 +3919,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
sp = req->outstanding_cmds[index];
if (!sp)
continue;
if (sp->ctx && !IS_PROT_IO(sp))
if (sp->type != SRB_SCSI_CMD)
continue;
sfcp = sp->fcport;
if (!(sfcp->flags & FCF_FCP2_DEVICE))