SCSI misc on 20230909
Mostly small stragglers that missed the initial merge. Driver updates are qla2xxx and smartpqi (mp3sas has a high diffstat due to the volatile qualifier removal, fnic due to unused function removal and sd.c has a lot of code shuffling to remove forward declarations). Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCZPyD4CYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishZWGAQDlh/3q 5YJp7f8sIqmgdOiKl3bln3API9Y0MPsC3z5TsAEAv9LYQZH3He4XvxMy/v5FioEs 8IWIoBsUZtcgoK6mI4w= =8Aj8 -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull more SCSI updates from James Bottomley: "Mostly small stragglers that missed the initial merge. Driver updates are qla2xxx and smartpqi (mp3sas has a high diffstat due to the volatile qualifier removal, fnic due to unused function removal and sd.c has a lot of code shuffling to remove forward declarations)" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (38 commits) scsi: ufs: core: No need to update UPIU.header.flags and lun in advanced RPMB handler scsi: ufs: core: Add advanced RPMB support where UFSHCI 4.0 does not support EHS length in UTRD scsi: mpt3sas: Remove volatile qualifier scsi: mpt3sas: Perform additional retries if doorbell read returns 0 scsi: libsas: Simplify sas_queue_reset() and remove unused code scsi: ufs: Fix the build for the old ARM OABI scsi: qla2xxx: Fix unused variable warning in qla2xxx_process_purls_pkt() scsi: fnic: Remove unused functions fnic_scsi_host_start/end_tag() scsi: qla2xxx: Fix spelling mistake "tranport" -> "transport" scsi: fnic: Replace sgreset tag with max_tag_id scsi: qla2xxx: Remove unused variables in qla24xx_build_scsi_type_6_iocbs() scsi: qla2xxx: Fix nvme_fc_rcv_ls_req() undefined error scsi: smartpqi: Change driver version to 2.1.24-046 scsi: smartpqi: Enhance error messages scsi: smartpqi: Enhance controller offline notification scsi: smartpqi: Enhance shutdown notification scsi: smartpqi: Simplify lun_number assignment scsi: smartpqi: Rename pciinfo to pci_info scsi: smartpqi: Rename MACRO to clarify purpose scsi: smartpqi: Add abort handler ...
This commit is contained in:
commit
2a5a4326e5
|
@ -4752,7 +4752,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
|||
}
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
scsi_rescan_device(&(sdev->sdev_gendev));
|
||||
scsi_rescan_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
}
|
||||
|
|
|
@ -1451,7 +1451,7 @@ retry_next:
|
|||
#endif
|
||||
break;
|
||||
}
|
||||
scsi_rescan_device(&device->sdev_gendev);
|
||||
scsi_rescan_device(device);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
|
||||
#define DRV_NAME "fnic"
|
||||
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
||||
#define DRV_VERSION "1.6.0.55"
|
||||
#define DRV_VERSION "1.6.0.56"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DFX DRV_NAME "%d: "
|
||||
|
||||
|
@ -236,6 +236,7 @@ struct fnic {
|
|||
unsigned int wq_count;
|
||||
unsigned int cq_count;
|
||||
|
||||
struct mutex sgreset_mutex;
|
||||
struct dentry *fnic_stats_debugfs_host;
|
||||
struct dentry *fnic_stats_debugfs_file;
|
||||
struct dentry *fnic_reset_debugfs_file;
|
||||
|
|
|
@ -2167,39 +2167,6 @@ clean_pending_aborts_end:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_scsi_host_start_tag
|
||||
* Allocates tagid from host's tag list
|
||||
**/
|
||||
static inline int
|
||||
fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
|
||||
{
|
||||
struct request *rq = scsi_cmd_to_rq(sc);
|
||||
struct request_queue *q = rq->q;
|
||||
struct request *dummy;
|
||||
|
||||
dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(dummy))
|
||||
return SCSI_NO_TAG;
|
||||
|
||||
rq->tag = dummy->tag;
|
||||
sc->host_scribble = (unsigned char *)dummy;
|
||||
|
||||
return dummy->tag;
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_scsi_host_end_tag
|
||||
* frees tag allocated by fnic_scsi_host_start_tag.
|
||||
**/
|
||||
static inline void
|
||||
fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
|
||||
{
|
||||
struct request *dummy = (struct request *)sc->host_scribble;
|
||||
|
||||
blk_mq_free_request(dummy);
|
||||
}
|
||||
|
||||
/*
|
||||
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
|
||||
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
|
||||
|
@ -2222,7 +2189,6 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
struct reset_stats *reset_stats;
|
||||
int tag = rq->tag;
|
||||
DECLARE_COMPLETION_ONSTACK(tm_done);
|
||||
int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
|
||||
bool new_sc = 0;
|
||||
|
||||
/* Wait for rport to unblock */
|
||||
|
@ -2252,17 +2218,17 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
}
|
||||
|
||||
fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
|
||||
/* Allocate tag if not present */
|
||||
|
||||
if (unlikely(tag < 0)) {
|
||||
/*
|
||||
* Really should fix the midlayer to pass in a proper
|
||||
* request for ioctls...
|
||||
* For device reset issued through sg3utils, we let
|
||||
* only one LUN_RESET to go through and use a special
|
||||
* tag equal to max_tag_id so that we don't have to allocate
|
||||
* or free it. It won't interact with tags
|
||||
* allocated by mid layer.
|
||||
*/
|
||||
tag = fnic_scsi_host_start_tag(fnic, sc);
|
||||
if (unlikely(tag == SCSI_NO_TAG))
|
||||
goto fnic_device_reset_end;
|
||||
tag_gen_flag = 1;
|
||||
mutex_lock(&fnic->sgreset_mutex);
|
||||
tag = fnic->fnic_max_tag_id;
|
||||
new_sc = 1;
|
||||
}
|
||||
io_lock = fnic_io_lock_hash(fnic, sc);
|
||||
|
@ -2434,9 +2400,8 @@ fnic_device_reset_end:
|
|||
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
|
||||
fnic_flags_and_state(sc));
|
||||
|
||||
/* free tag if it is allocated */
|
||||
if (unlikely(tag_gen_flag))
|
||||
fnic_scsi_host_end_tag(fnic, sc);
|
||||
if (new_sc)
|
||||
mutex_unlock(&fnic->sgreset_mutex);
|
||||
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Returning from device reset %s\n",
|
||||
|
|
|
@ -386,37 +386,7 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sas_get_local_phy);
|
||||
|
||||
static void sas_wait_eh(struct domain_device *dev)
|
||||
{
|
||||
struct sas_ha_struct *ha = dev->port->ha;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (dev_is_sata(dev)) {
|
||||
ata_port_wait_eh(dev->sata_dev.ap);
|
||||
return;
|
||||
}
|
||||
retry:
|
||||
spin_lock_irq(&ha->lock);
|
||||
|
||||
while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
|
||||
prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
|
||||
spin_unlock_irq(&ha->lock);
|
||||
schedule();
|
||||
spin_lock_irq(&ha->lock);
|
||||
}
|
||||
finish_wait(&ha->eh_wait_q, &wait);
|
||||
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
/* make sure SCSI EH is complete */
|
||||
if (scsi_host_in_recovery(ha->shost)) {
|
||||
msleep(10);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
static int sas_queue_reset(struct domain_device *dev, int reset_type,
|
||||
u64 lun, int wait)
|
||||
static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun)
|
||||
{
|
||||
struct sas_ha_struct *ha = dev->port->ha;
|
||||
int scheduled = 0, tries = 100;
|
||||
|
@ -424,8 +394,6 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
|
|||
/* ata: promote lun reset to bus reset */
|
||||
if (dev_is_sata(dev)) {
|
||||
sas_ata_schedule_reset(dev);
|
||||
if (wait)
|
||||
sas_ata_wait_eh(dev);
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -443,9 +411,6 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
|
|||
}
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
if (wait)
|
||||
sas_wait_eh(dev);
|
||||
|
||||
if (scheduled)
|
||||
return SUCCESS;
|
||||
}
|
||||
|
@ -498,7 +463,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
|||
struct sas_internal *i = to_sas_internal(host->transportt);
|
||||
|
||||
if (current != host->ehandler)
|
||||
return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
|
||||
return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun);
|
||||
|
||||
int_to_scsilun(cmd->device->lun, &lun);
|
||||
|
||||
|
@ -521,7 +486,7 @@ int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
|
|||
struct sas_internal *i = to_sas_internal(host->transportt);
|
||||
|
||||
if (current != host->ehandler)
|
||||
return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
|
||||
return sas_queue_reset(dev, SAS_DEV_RESET, 0);
|
||||
|
||||
if (!i->dft->lldd_I_T_nexus_reset)
|
||||
return FAILED;
|
||||
|
|
|
@ -199,7 +199,7 @@
|
|||
*
|
||||
*****************************************************************************/
|
||||
|
||||
typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
|
||||
typedef struct _MPI2_SYSTEM_INTERFACE_REGS {
|
||||
U32 Doorbell; /*0x00 */
|
||||
U32 WriteSequence; /*0x04 */
|
||||
U32 HostDiagnostic; /*0x08 */
|
||||
|
|
|
@ -138,6 +138,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
|
|||
static void
|
||||
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
|
||||
|
||||
static u32
|
||||
_base_readl_ext_retry(const void __iomem *addr);
|
||||
|
||||
/**
|
||||
* mpt3sas_base_check_cmd_timeout - Function
|
||||
* to check timeout and command termination due
|
||||
|
@ -201,7 +204,7 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
|
|||
* while reading the system interface register.
|
||||
*/
|
||||
static inline u32
|
||||
_base_readl_aero(const volatile void __iomem *addr)
|
||||
_base_readl_aero(const void __iomem *addr)
|
||||
{
|
||||
u32 i = 0, ret_val;
|
||||
|
||||
|
@ -213,8 +216,22 @@ _base_readl_aero(const volatile void __iomem *addr)
|
|||
return ret_val;
|
||||
}
|
||||
|
||||
static u32
|
||||
_base_readl_ext_retry(const void __iomem *addr)
|
||||
{
|
||||
u32 i, ret_val;
|
||||
|
||||
for (i = 0 ; i < 30 ; i++) {
|
||||
ret_val = readl(addr);
|
||||
if (ret_val == 0)
|
||||
continue;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
_base_readl(const volatile void __iomem *addr)
|
||||
_base_readl(const void __iomem *addr)
|
||||
{
|
||||
return readl(addr);
|
||||
}
|
||||
|
@ -940,7 +957,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
|
|||
|
||||
dump_stack();
|
||||
|
||||
doorbell = ioc->base_readl(&ioc->chip->Doorbell);
|
||||
doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
|
||||
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
|
||||
mpt3sas_print_fault_code(ioc, doorbell &
|
||||
MPI2_DOORBELL_DATA_MASK);
|
||||
|
@ -6686,7 +6703,7 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
|
|||
{
|
||||
u32 s, sc;
|
||||
|
||||
s = ioc->base_readl(&ioc->chip->Doorbell);
|
||||
s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
|
||||
sc = s & MPI2_IOC_STATE_MASK;
|
||||
return cooked ? sc : s;
|
||||
}
|
||||
|
@ -6831,7 +6848,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
|
|||
__func__, count, timeout));
|
||||
return 0;
|
||||
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
|
||||
doorbell = ioc->base_readl(&ioc->chip->Doorbell);
|
||||
doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
|
||||
if ((doorbell & MPI2_IOC_STATE_MASK) ==
|
||||
MPI2_IOC_STATE_FAULT) {
|
||||
mpt3sas_print_fault_code(ioc, doorbell);
|
||||
|
@ -6871,7 +6888,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
|
|||
count = 0;
|
||||
cntdn = 1000 * timeout;
|
||||
do {
|
||||
doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
|
||||
doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
|
||||
if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
|
||||
dhsprintk(ioc,
|
||||
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
|
||||
|
@ -7019,7 +7036,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
|
|||
__le32 *mfp;
|
||||
|
||||
/* make sure doorbell is not in use */
|
||||
if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
|
||||
if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
|
||||
ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -7068,7 +7085,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
|
|||
}
|
||||
|
||||
/* read the first two 16-bits, it gives the total length of the reply */
|
||||
reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
|
||||
reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
|
||||
& MPI2_DOORBELL_DATA_MASK);
|
||||
writel(0, &ioc->chip->HostInterruptStatus);
|
||||
if ((_base_wait_for_doorbell_int(ioc, 5))) {
|
||||
|
@ -7076,7 +7093,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
|
|||
__LINE__);
|
||||
return -EFAULT;
|
||||
}
|
||||
reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
|
||||
reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
|
||||
& MPI2_DOORBELL_DATA_MASK);
|
||||
writel(0, &ioc->chip->HostInterruptStatus);
|
||||
|
||||
|
@ -7087,10 +7104,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
|
|||
return -EFAULT;
|
||||
}
|
||||
if (i >= reply_bytes/2) /* overflow case */
|
||||
ioc->base_readl(&ioc->chip->Doorbell);
|
||||
ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
|
||||
else
|
||||
reply[i] = le16_to_cpu(
|
||||
ioc->base_readl(&ioc->chip->Doorbell)
|
||||
ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
|
||||
& MPI2_DOORBELL_DATA_MASK);
|
||||
writel(0, &ioc->chip->HostInterruptStatus);
|
||||
}
|
||||
|
@ -7949,7 +7966,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
|
|||
goto out;
|
||||
}
|
||||
|
||||
host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
|
||||
host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
|
||||
drsprintk(ioc,
|
||||
ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
|
||||
count, host_diagnostic));
|
||||
|
@ -7969,7 +7986,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
|
|||
for (count = 0; count < (300000000 /
|
||||
MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
|
||||
|
||||
host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
|
||||
host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
|
||||
|
||||
if (host_diagnostic == 0xFFFFFFFF) {
|
||||
ioc_info(ioc,
|
||||
|
@ -8359,10 +8376,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
|
|||
ioc->rdpq_array_enable_assigned = 0;
|
||||
ioc->use_32bit_dma = false;
|
||||
ioc->dma_mask = 64;
|
||||
if (ioc->is_aero_ioc)
|
||||
if (ioc->is_aero_ioc) {
|
||||
ioc->base_readl = &_base_readl_aero;
|
||||
else
|
||||
ioc->base_readl_ext_retry = &_base_readl_ext_retry;
|
||||
} else {
|
||||
ioc->base_readl = &_base_readl;
|
||||
ioc->base_readl_ext_retry = &_base_readl;
|
||||
}
|
||||
r = mpt3sas_base_map_resources(ioc);
|
||||
if (r)
|
||||
goto out_free_resources;
|
||||
|
|
|
@ -994,7 +994,7 @@ typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
|||
typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
||||
u16 funcdep);
|
||||
typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
|
||||
typedef u32 (*BASE_READ_REG) (const volatile void __iomem *addr);
|
||||
typedef u32 (*BASE_READ_REG) (const void __iomem *addr);
|
||||
/*
|
||||
* To get high iops reply queue's msix index when high iops mode is enabled
|
||||
* else get the msix index of general reply queues.
|
||||
|
@ -1618,6 +1618,7 @@ struct MPT3SAS_ADAPTER {
|
|||
u8 diag_trigger_active;
|
||||
u8 atomic_desc_capable;
|
||||
BASE_READ_REG base_readl;
|
||||
BASE_READ_REG base_readl_ext_retry;
|
||||
struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
|
||||
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
|
||||
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
|
||||
|
|
|
@ -1500,7 +1500,7 @@ static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
|
|||
|
||||
sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
|
||||
if (sdev) {
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
scsi_rescan_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -548,7 +548,6 @@ extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
|
|||
extern void qedf_wq_grcdump(struct work_struct *work);
|
||||
void qedf_stag_change_work(struct work_struct *work);
|
||||
void qedf_ctx_soft_reset(struct fc_lport *lport);
|
||||
extern void qedf_board_disable_work(struct work_struct *work);
|
||||
extern void qedf_schedule_hw_err_handler(void *dev,
|
||||
enum qed_hw_err_type err_type);
|
||||
|
||||
|
|
|
@ -67,8 +67,6 @@ void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
|
|||
int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
|
||||
u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
|
||||
void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
|
||||
int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
|
||||
void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
|
||||
void qedi_clearsq(struct qedi_ctx *qedi,
|
||||
struct qedi_conn *qedi_conn,
|
||||
struct iscsi_task *task);
|
||||
|
|
|
@ -3093,8 +3093,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
|
|||
vha->flags.difdix_supported = 1;
|
||||
ql_dbg(ql_dbg_user, vha, 0x7082,
|
||||
"Registered for DIF/DIX type 1 and 3 protection.\n");
|
||||
if (ql2xenabledif == 1)
|
||||
prot = SHOST_DIX_TYPE0_PROTECTION;
|
||||
scsi_host_set_prot(vha->host,
|
||||
prot | SHOST_DIF_TYPE1_PROTECTION
|
||||
| SHOST_DIF_TYPE2_PROTECTION
|
||||
|
|
|
@ -12,13 +12,12 @@
|
|||
* ----------------------------------------------------------------------
|
||||
* | Module Init and Probe | 0x0199 | |
|
||||
* | Mailbox commands | 0x1206 | 0x11a5-0x11ff |
|
||||
* | Device Discovery | 0x2134 | 0x210e-0x2115 |
|
||||
* | | | 0x211c-0x2128 |
|
||||
* | | | 0x212c-0x2134 |
|
||||
* | Device Discovery | 0x2134 | 0x2112-0x2115 |
|
||||
* | | | 0x2127-0x2128 |
|
||||
* | Queue Command and IO tracing | 0x3074 | 0x300b |
|
||||
* | | | 0x3027-0x3028 |
|
||||
* | | | 0x303d-0x3041 |
|
||||
* | | | 0x302d,0x3033 |
|
||||
* | | | 0x302e,0x3033 |
|
||||
* | | | 0x3036,0x3038 |
|
||||
* | | | 0x303a |
|
||||
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
|
||||
|
|
|
@ -368,6 +368,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
|
|||
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
|
||||
#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
|
||||
#define ql_dbg_edif 0x00000400 /* edif and purex debug */
|
||||
#define ql_dbg_unsol 0x00000100 /* Unsolicited path debug */
|
||||
|
||||
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
|
||||
uint32_t, void **);
|
||||
|
|
|
@ -346,6 +346,12 @@ struct name_list_extended {
|
|||
u8 sent;
|
||||
};
|
||||
|
||||
struct qla_nvme_fc_rjt {
|
||||
struct fcnvme_ls_rjt *c;
|
||||
dma_addr_t cdma;
|
||||
u16 size;
|
||||
};
|
||||
|
||||
struct els_reject {
|
||||
struct fc_els_ls_rjt *c;
|
||||
dma_addr_t cdma;
|
||||
|
@ -503,6 +509,20 @@ struct ct_arg {
|
|||
port_id_t id;
|
||||
};
|
||||
|
||||
struct qla_nvme_lsrjt_pt_arg {
|
||||
struct fc_port *fcport;
|
||||
u8 opcode;
|
||||
u8 vp_idx;
|
||||
u8 reason;
|
||||
u8 explanation;
|
||||
__le16 nport_handle;
|
||||
u16 control_flags;
|
||||
__le16 ox_id;
|
||||
__le32 xchg_address;
|
||||
u32 tx_byte_count, rx_byte_count;
|
||||
dma_addr_t tx_addr, rx_addr;
|
||||
};
|
||||
|
||||
/*
|
||||
* SRB extensions.
|
||||
*/
|
||||
|
@ -611,13 +631,16 @@ struct srb_iocb {
|
|||
void *desc;
|
||||
|
||||
/* These are only used with ls4 requests */
|
||||
int cmd_len;
|
||||
int rsp_len;
|
||||
__le32 cmd_len;
|
||||
__le32 rsp_len;
|
||||
dma_addr_t cmd_dma;
|
||||
dma_addr_t rsp_dma;
|
||||
enum nvmefc_fcp_datadir dir;
|
||||
uint32_t dl;
|
||||
uint32_t timeout_sec;
|
||||
__le32 exchange_address;
|
||||
__le16 nport_handle;
|
||||
__le16 ox_id;
|
||||
struct list_head entry;
|
||||
} nvme;
|
||||
struct {
|
||||
|
@ -707,6 +730,10 @@ typedef struct srb {
|
|||
struct fc_port *fcport;
|
||||
struct scsi_qla_host *vha;
|
||||
unsigned int start_timer:1;
|
||||
unsigned int abort:1;
|
||||
unsigned int aborted:1;
|
||||
unsigned int completed:1;
|
||||
unsigned int unsol_rsp:1;
|
||||
|
||||
uint32_t handle;
|
||||
uint16_t flags;
|
||||
|
@ -2542,6 +2569,7 @@ enum rscn_addr_format {
|
|||
typedef struct fc_port {
|
||||
struct list_head list;
|
||||
struct scsi_qla_host *vha;
|
||||
struct list_head unsol_ctx_head;
|
||||
|
||||
unsigned int conf_compl_supported:1;
|
||||
unsigned int deleted:2;
|
||||
|
@ -3742,6 +3770,16 @@ struct qla_fw_resources {
|
|||
u16 pad;
|
||||
};
|
||||
|
||||
struct qla_fw_res {
|
||||
u16 iocb_total;
|
||||
u16 iocb_limit;
|
||||
atomic_t iocb_used;
|
||||
|
||||
u16 exch_total;
|
||||
u16 exch_limit;
|
||||
atomic_t exch_used;
|
||||
};
|
||||
|
||||
#define QLA_IOCB_PCT_LIMIT 95
|
||||
|
||||
struct qla_buf_pool {
|
||||
|
@ -3787,6 +3825,12 @@ struct qla_qpair {
|
|||
|
||||
uint16_t id; /* qp number used with FW */
|
||||
uint16_t vp_idx; /* vport ID */
|
||||
|
||||
uint16_t dsd_inuse;
|
||||
uint16_t dsd_avail;
|
||||
struct list_head dsd_list;
|
||||
#define NUM_DSD_CHAIN 4096
|
||||
|
||||
mempool_t *srb_mempool;
|
||||
|
||||
struct pci_dev *pdev;
|
||||
|
@ -4384,7 +4428,6 @@ struct qla_hw_data {
|
|||
uint8_t aen_mbx_count;
|
||||
atomic_t num_pend_mbx_stage1;
|
||||
atomic_t num_pend_mbx_stage2;
|
||||
atomic_t num_pend_mbx_stage3;
|
||||
uint16_t frame_payload_size;
|
||||
|
||||
uint32_t login_retry_count;
|
||||
|
@ -4714,11 +4757,6 @@ struct qla_hw_data {
|
|||
struct fw_blob *hablob;
|
||||
struct qla82xx_legacy_intr_set nx_legacy_intr;
|
||||
|
||||
uint16_t gbl_dsd_inuse;
|
||||
uint16_t gbl_dsd_avail;
|
||||
struct list_head gbl_dsd_list;
|
||||
#define NUM_DSD_CHAIN 4096
|
||||
|
||||
uint8_t fw_type;
|
||||
uint32_t file_prd_off; /* File firmware product offset */
|
||||
|
||||
|
@ -4800,6 +4838,8 @@ struct qla_hw_data {
|
|||
struct els_reject elsrej;
|
||||
u8 edif_post_stop_cnt_down;
|
||||
struct qla_vp_map *vp_map;
|
||||
struct qla_nvme_fc_rjt lsrjt;
|
||||
struct qla_fw_res fwres ____cacheline_aligned;
|
||||
};
|
||||
|
||||
#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
|
||||
|
@ -4832,6 +4872,7 @@ struct active_regions {
|
|||
* is variable) starting at "iocb".
|
||||
*/
|
||||
struct purex_item {
|
||||
void *purls_context;
|
||||
struct list_head list;
|
||||
struct scsi_qla_host *vha;
|
||||
void (*process_item)(struct scsi_qla_host *vha,
|
||||
|
|
|
@ -276,6 +276,16 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
|
|||
|
||||
seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
|
||||
exch_used, ha->base_qpair->fwres.exch_limit);
|
||||
|
||||
if (ql2xenforce_iocb_limit == 2) {
|
||||
iocbs_used = atomic_read(&ha->fwres.iocb_used);
|
||||
exch_used = atomic_read(&ha->fwres.exch_used);
|
||||
seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n",
|
||||
iocbs_used, ha->fwres.iocb_limit);
|
||||
|
||||
seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n",
|
||||
exch_used, ha->fwres.exch_limit);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -603,7 +603,11 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
|
|||
fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
|
||||
fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
|
||||
fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
|
||||
void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp);
|
||||
void qla24xx_queue_purex_item(scsi_qla_host_t *, struct purex_item *,
|
||||
void (*process_item)(struct scsi_qla_host *,
|
||||
struct purex_item *));
|
||||
void __qla_consume_iocb(struct scsi_qla_host *, void **, struct rsp_que **);
|
||||
void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp);
|
||||
|
||||
/*
|
||||
* Global Function Prototypes in qla_sup.c source file.
|
||||
|
@ -666,9 +670,11 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
|
|||
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
|
||||
extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
|
||||
extern int qla2x00_mailbox_passthru(struct bsg_job *bsg_job);
|
||||
int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
|
||||
struct rsp_que **rsp, u8 *buf, u32 buf_len);
|
||||
|
||||
int qla2x00_sys_ld_info(struct bsg_job *bsg_job);
|
||||
int __qla_copy_purex_to_buffer(struct scsi_qla_host *, void **,
|
||||
struct rsp_que **, u8 *, u32);
|
||||
struct purex_item *qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha,
|
||||
void **pkt, struct rsp_que **rsp, bool is_purls, bool byte_order);
|
||||
int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in,
|
||||
uint16_t *mbx_out);
|
||||
|
||||
|
|
|
@ -2223,6 +2223,8 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
|
|||
rval = QLA_FUNCTION_FAILED;
|
||||
}
|
||||
}
|
||||
if (tm_iocb->u.tmf.data)
|
||||
rval = tm_iocb->u.tmf.data;
|
||||
|
||||
done_free_sp:
|
||||
/* ref: INIT */
|
||||
|
@ -4203,7 +4205,7 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
|
|||
u8 i;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
__qla_adjust_iocb_limit(ha->base_qpair);
|
||||
__qla_adjust_iocb_limit(ha->base_qpair);
|
||||
ha->base_qpair->fwres.iocbs_used = 0;
|
||||
ha->base_qpair->fwres.exch_used = 0;
|
||||
|
||||
|
@ -4214,6 +4216,14 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
|
|||
ha->queue_pair_map[i]->fwres.exch_used = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ha->fwres.iocb_total = ha->orig_fw_iocb_count;
|
||||
ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
|
||||
ha->fwres.exch_total = ha->orig_fw_xcb_count;
|
||||
ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
|
||||
|
||||
atomic_set(&ha->fwres.iocb_used, 0);
|
||||
atomic_set(&ha->fwres.exch_used, 0);
|
||||
}
|
||||
|
||||
void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
|
||||
|
@ -5554,6 +5564,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
|
|||
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
|
||||
INIT_LIST_HEAD(&fcport->gnl_entry);
|
||||
INIT_LIST_HEAD(&fcport->list);
|
||||
INIT_LIST_HEAD(&fcport->unsol_ctx_head);
|
||||
|
||||
INIT_LIST_HEAD(&fcport->sess_cmd_list);
|
||||
spin_lock_init(&fcport->sess_cmd_lock);
|
||||
|
@ -5596,7 +5607,7 @@ static void qla_get_login_template(scsi_qla_host_t *vha)
|
|||
__be32 *q;
|
||||
|
||||
memset(ha->init_cb, 0, ha->init_cb_size);
|
||||
sz = min_t(int, sizeof(struct fc_els_csp), ha->init_cb_size);
|
||||
sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
|
||||
rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
|
||||
ha->init_cb, sz);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
|
@ -7390,14 +7401,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
|
|||
}
|
||||
|
||||
/* purge MBox commands */
|
||||
if (atomic_read(&ha->num_pend_mbx_stage3)) {
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
|
||||
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
|
||||
complete(&ha->mbx_intr_comp);
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
i = 0;
|
||||
while (atomic_read(&ha->num_pend_mbx_stage3) ||
|
||||
atomic_read(&ha->num_pend_mbx_stage2) ||
|
||||
while (atomic_read(&ha->num_pend_mbx_stage2) ||
|
||||
atomic_read(&ha->num_pend_mbx_stage1)) {
|
||||
msleep(20);
|
||||
i++;
|
||||
|
@ -9643,6 +9655,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
|
|||
qpair->vp_idx = vp_idx;
|
||||
qpair->fw_started = ha->flags.fw_started;
|
||||
INIT_LIST_HEAD(&qpair->hints_list);
|
||||
INIT_LIST_HEAD(&qpair->dsd_list);
|
||||
qpair->chip_reset = ha->base_qpair->chip_reset;
|
||||
qpair->enable_class_2 = ha->base_qpair->enable_class_2;
|
||||
qpair->enable_explicit_conf =
|
||||
|
@ -9771,6 +9784,19 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
|
|||
if (ret != QLA_SUCCESS)
|
||||
goto fail;
|
||||
|
||||
if (!list_empty(&qpair->dsd_list)) {
|
||||
struct dsd_dma *dsd_ptr, *tdsd_ptr;
|
||||
|
||||
/* clean up allocated prev pool */
|
||||
list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
|
||||
&qpair->dsd_list, list) {
|
||||
dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
|
||||
dsd_ptr->dsd_list_dma);
|
||||
list_del(&dsd_ptr->list);
|
||||
kfree(dsd_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&ha->mq_lock);
|
||||
ha->queue_pair_map[qpair->id] = NULL;
|
||||
clear_bit(qpair->id, ha->qpair_qid_map);
|
||||
|
|
|
@ -386,6 +386,7 @@ enum {
|
|||
RESOURCE_IOCB = BIT_0,
|
||||
RESOURCE_EXCH = BIT_1, /* exchange */
|
||||
RESOURCE_FORCE = BIT_2,
|
||||
RESOURCE_HA = BIT_3,
|
||||
};
|
||||
|
||||
static inline int
|
||||
|
@ -393,7 +394,7 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
|
|||
{
|
||||
u16 iocbs_used, i;
|
||||
u16 exch_used;
|
||||
struct qla_hw_data *ha = qp->vha->hw;
|
||||
struct qla_hw_data *ha = qp->hw;
|
||||
|
||||
if (!ql2xenforce_iocb_limit) {
|
||||
iores->res_type = RESOURCE_NONE;
|
||||
|
@ -428,15 +429,69 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
|
|||
return -ENOSPC;
|
||||
}
|
||||
}
|
||||
|
||||
if (ql2xenforce_iocb_limit == 2) {
|
||||
if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
|
||||
ha->fwres.iocb_limit) {
|
||||
iores->res_type = RESOURCE_NONE;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (iores->res_type & RESOURCE_EXCH) {
|
||||
if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
|
||||
ha->fwres.exch_limit) {
|
||||
iores->res_type = RESOURCE_NONE;
|
||||
return -ENOSPC;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
force:
|
||||
qp->fwres.iocbs_used += iores->iocb_cnt;
|
||||
qp->fwres.exch_used += iores->exch_cnt;
|
||||
if (ql2xenforce_iocb_limit == 2) {
|
||||
atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
|
||||
atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
|
||||
iores->res_type |= RESOURCE_HA;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* decrement to zero. This routine will not decrement below zero
|
||||
* @v: pointer of type atomic_t
|
||||
* @amount: amount to decrement from v
|
||||
*/
|
||||
static void qla_atomic_dtz(atomic_t *v, int amount)
|
||||
{
|
||||
int c, old, dec;
|
||||
|
||||
c = atomic_read(v);
|
||||
for (;;) {
|
||||
dec = c - amount;
|
||||
if (unlikely(dec < 0))
|
||||
dec = 0;
|
||||
|
||||
old = atomic_cmpxchg((v), c, dec);
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
|
||||
{
|
||||
struct qla_hw_data *ha = qp->hw;
|
||||
|
||||
if (iores->res_type & RESOURCE_HA) {
|
||||
if (iores->res_type & RESOURCE_IOCB)
|
||||
qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
|
||||
|
||||
if (iores->res_type & RESOURCE_EXCH)
|
||||
qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
|
||||
}
|
||||
|
||||
if (iores->res_type & RESOURCE_IOCB) {
|
||||
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
|
||||
qp->fwres.iocbs_used -= iores->iocb_cnt;
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <scsi/scsi_tcq.h>
|
||||
|
||||
static int qla_start_scsi_type6(srb_t *sp);
|
||||
/**
|
||||
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
|
||||
* @sp: SCSI command
|
||||
|
@ -590,8 +591,6 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
|
|||
uint16_t tot_dsds)
|
||||
{
|
||||
struct dsd64 *cur_dsd = NULL, *next_dsd;
|
||||
scsi_qla_host_t *vha;
|
||||
struct qla_hw_data *ha;
|
||||
struct scsi_cmnd *cmd;
|
||||
struct scatterlist *cur_seg;
|
||||
uint8_t avail_dsds;
|
||||
|
@ -613,9 +612,6 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
|
|||
return 0;
|
||||
}
|
||||
|
||||
vha = sp->vha;
|
||||
ha = vha->hw;
|
||||
|
||||
/* Set transfer direction */
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
|
||||
|
@ -636,14 +632,13 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
|
|||
tot_dsds -= avail_dsds;
|
||||
dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
|
||||
|
||||
dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
|
||||
struct dsd_dma, list);
|
||||
dsd_ptr = list_first_entry(&qpair->dsd_list, struct dsd_dma, list);
|
||||
next_dsd = dsd_ptr->dsd_addr;
|
||||
list_del(&dsd_ptr->list);
|
||||
ha->gbl_dsd_avail--;
|
||||
qpair->dsd_avail--;
|
||||
list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
|
||||
ctx->dsd_use_cnt++;
|
||||
ha->gbl_dsd_inuse++;
|
||||
qpair->dsd_inuse++;
|
||||
|
||||
if (first_iocb) {
|
||||
first_iocb = 0;
|
||||
|
@ -1722,6 +1717,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
|
|||
if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
|
||||
if (cmd->cmd_len <= 16)
|
||||
return qla24xx_start_scsi(sp);
|
||||
else
|
||||
return qla_start_scsi_type6(sp);
|
||||
}
|
||||
|
||||
/* Setup device pointers. */
|
||||
|
@ -2101,6 +2098,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
|
|||
if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
|
||||
if (cmd->cmd_len <= 16)
|
||||
return qla2xxx_start_scsi_mq(sp);
|
||||
else
|
||||
return qla_start_scsi_type6(sp);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&qpair->qp_lock, flags);
|
||||
|
@ -3368,6 +3367,7 @@ qla82xx_start_scsi(srb_t *sp)
|
|||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req = NULL;
|
||||
struct rsp_que *rsp = NULL;
|
||||
struct qla_qpair *qpair = sp->qpair;
|
||||
|
||||
/* Setup device pointers. */
|
||||
reg = &ha->iobase->isp82;
|
||||
|
@ -3416,18 +3416,18 @@ qla82xx_start_scsi(srb_t *sp)
|
|||
uint16_t i;
|
||||
|
||||
more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
|
||||
if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
|
||||
if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
|
||||
ql_dbg(ql_dbg_io, vha, 0x300d,
|
||||
"Num of DSD list %d is than %d for cmd=%p.\n",
|
||||
more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
|
||||
more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN,
|
||||
cmd);
|
||||
goto queuing_error;
|
||||
}
|
||||
|
||||
if (more_dsd_lists <= ha->gbl_dsd_avail)
|
||||
if (more_dsd_lists <= qpair->dsd_avail)
|
||||
goto sufficient_dsds;
|
||||
else
|
||||
more_dsd_lists -= ha->gbl_dsd_avail;
|
||||
more_dsd_lists -= qpair->dsd_avail;
|
||||
|
||||
for (i = 0; i < more_dsd_lists; i++) {
|
||||
dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
|
||||
|
@ -3447,8 +3447,8 @@ qla82xx_start_scsi(srb_t *sp)
|
|||
"for cmd=%p.\n", cmd);
|
||||
goto queuing_error;
|
||||
}
|
||||
list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
|
||||
ha->gbl_dsd_avail++;
|
||||
list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
|
||||
qpair->dsd_avail++;
|
||||
}
|
||||
|
||||
sufficient_dsds:
|
||||
|
@ -3767,21 +3767,28 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
|
|||
nvme = &sp->u.iocb_cmd;
|
||||
cmd_pkt->entry_type = PT_LS4_REQUEST;
|
||||
cmd_pkt->entry_count = 1;
|
||||
cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
|
||||
|
||||
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
|
||||
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
|
||||
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
|
||||
|
||||
cmd_pkt->tx_dseg_count = cpu_to_le16(1);
|
||||
cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
|
||||
cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
|
||||
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
|
||||
if (sp->unsol_rsp) {
|
||||
cmd_pkt->control_flags =
|
||||
cpu_to_le16(CF_LS4_RESPONDER << CF_LS4_SHIFT);
|
||||
cmd_pkt->nport_handle = nvme->u.nvme.nport_handle;
|
||||
cmd_pkt->exchange_address = nvme->u.nvme.exchange_address;
|
||||
} else {
|
||||
cmd_pkt->control_flags =
|
||||
cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
|
||||
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
|
||||
cmd_pkt->rx_dseg_count = cpu_to_le16(1);
|
||||
cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
|
||||
cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
|
||||
put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
|
||||
}
|
||||
|
||||
cmd_pkt->rx_dseg_count = cpu_to_le16(1);
|
||||
cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
|
||||
cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
|
||||
put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
|
||||
cmd_pkt->tx_dseg_count = cpu_to_le16(1);
|
||||
cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
|
||||
cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
|
||||
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -4198,3 +4205,267 @@ queuing_error:
|
|||
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla_start_scsi_type6() - Send a SCSI command to the ISP
|
||||
* @sp: command to send to the ISP
|
||||
*
|
||||
* Returns non-zero if a failure occurred, else zero.
|
||||
*/
|
||||
static int
|
||||
qla_start_scsi_type6(srb_t *sp)
|
||||
{
|
||||
int nseg;
|
||||
unsigned long flags;
|
||||
uint32_t *clr_ptr;
|
||||
uint32_t handle;
|
||||
struct cmd_type_6 *cmd_pkt;
|
||||
uint16_t cnt;
|
||||
uint16_t req_cnt;
|
||||
uint16_t tot_dsds;
|
||||
struct req_que *req = NULL;
|
||||
struct rsp_que *rsp;
|
||||
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
|
||||
struct scsi_qla_host *vha = sp->fcport->vha;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_qpair *qpair = sp->qpair;
|
||||
uint16_t more_dsd_lists = 0;
|
||||
struct dsd_dma *dsd_ptr;
|
||||
uint16_t i;
|
||||
__be32 *fcp_dl;
|
||||
uint8_t additional_cdb_len;
|
||||
struct ct6_dsd *ctx;
|
||||
|
||||
/* Acquire qpair specific lock */
|
||||
spin_lock_irqsave(&qpair->qp_lock, flags);
|
||||
|
||||
/* Setup qpair pointers */
|
||||
req = qpair->req;
|
||||
rsp = qpair->rsp;
|
||||
|
||||
/* So we know we haven't pci_map'ed anything yet */
|
||||
tot_dsds = 0;
|
||||
|
||||
/* Send marker if required */
|
||||
if (vha->marker_needed != 0) {
|
||||
if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
|
||||
spin_unlock_irqrestore(&qpair->qp_lock, flags);
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
vha->marker_needed = 0;
|
||||
}
|
||||
|
||||
handle = qla2xxx_get_next_handle(req);
|
||||
if (handle == 0)
|
||||
goto queuing_error;
|
||||
|
||||
/* Map the sg table so we have an accurate count of sg entries needed */
|
||||
if (scsi_sg_count(cmd)) {
|
||||
nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
|
||||
scsi_sg_count(cmd), cmd->sc_data_direction);
|
||||
if (unlikely(!nseg))
|
||||
goto queuing_error;
|
||||
} else {
|
||||
nseg = 0;
|
||||
}
|
||||
|
||||
tot_dsds = nseg;
|
||||
|
||||
/* eventhough driver only need 1 T6 IOCB, FW still convert DSD to Continueation IOCB */
|
||||
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
|
||||
|
||||
sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
|
||||
sp->iores.exch_cnt = 1;
|
||||
sp->iores.iocb_cnt = req_cnt;
|
||||
|
||||
if (qla_get_fw_resources(sp->qpair, &sp->iores))
|
||||
goto queuing_error;
|
||||
|
||||
more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
|
||||
if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
|
||||
ql_dbg(ql_dbg_io, vha, 0x3028,
|
||||
"Num of DSD list %d is than %d for cmd=%p.\n",
|
||||
more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, cmd);
|
||||
goto queuing_error;
|
||||
}
|
||||
|
||||
if (more_dsd_lists <= qpair->dsd_avail)
|
||||
goto sufficient_dsds;
|
||||
else
|
||||
more_dsd_lists -= qpair->dsd_avail;
|
||||
|
||||
for (i = 0; i < more_dsd_lists; i++) {
|
||||
dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
|
||||
if (!dsd_ptr) {
|
||||
ql_log(ql_log_fatal, vha, 0x3029,
|
||||
"Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd);
|
||||
goto queuing_error;
|
||||
}
|
||||
INIT_LIST_HEAD(&dsd_ptr->list);
|
||||
|
||||
dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
|
||||
GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
|
||||
if (!dsd_ptr->dsd_addr) {
|
||||
kfree(dsd_ptr);
|
||||
ql_log(ql_log_fatal, vha, 0x302a,
|
||||
"Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd);
|
||||
goto queuing_error;
|
||||
}
|
||||
list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
|
||||
qpair->dsd_avail++;
|
||||
}
|
||||
|
||||
sufficient_dsds:
|
||||
req_cnt = 1;
|
||||
|
||||
if (req->cnt < (req_cnt + 2)) {
|
||||
if (IS_SHADOW_REG_CAPABLE(ha)) {
|
||||
cnt = *req->out_ptr;
|
||||
} else {
|
||||
cnt = (uint16_t)rd_reg_dword_relaxed(req->req_q_out);
|
||||
if (qla2x00_check_reg16_for_disconnect(vha, cnt))
|
||||
goto queuing_error;
|
||||
}
|
||||
|
||||
if (req->ring_index < cnt)
|
||||
req->cnt = cnt - req->ring_index;
|
||||
else
|
||||
req->cnt = req->length - (req->ring_index - cnt);
|
||||
if (req->cnt < (req_cnt + 2))
|
||||
goto queuing_error;
|
||||
}
|
||||
|
||||
ctx = &sp->u.scmd.ct6_ctx;
|
||||
|
||||
memset(ctx, 0, sizeof(struct ct6_dsd));
|
||||
ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
|
||||
GFP_ATOMIC, &ctx->fcp_cmnd_dma);
|
||||
if (!ctx->fcp_cmnd) {
|
||||
ql_log(ql_log_fatal, vha, 0x3031,
|
||||
"Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
|
||||
goto queuing_error;
|
||||
}
|
||||
|
||||
/* Initialize the DSD list and dma handle */
|
||||
INIT_LIST_HEAD(&ctx->dsd_list);
|
||||
ctx->dsd_use_cnt = 0;
|
||||
|
||||
if (cmd->cmd_len > 16) {
|
||||
additional_cdb_len = cmd->cmd_len - 16;
|
||||
if (cmd->cmd_len % 4 ||
|
||||
cmd->cmd_len > QLA_CDB_BUF_SIZE) {
|
||||
/*
|
||||
* SCSI command bigger than 16 bytes must be
|
||||
* multiple of 4 or too big.
|
||||
*/
|
||||
ql_log(ql_log_warn, vha, 0x3033,
|
||||
"scsi cmd len %d not multiple of 4 for cmd=%p.\n",
|
||||
cmd->cmd_len, cmd);
|
||||
goto queuing_error_fcp_cmnd;
|
||||
}
|
||||
ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
|
||||
} else {
|
||||
additional_cdb_len = 0;
|
||||
ctx->fcp_cmnd_len = 12 + 16 + 4;
|
||||
}
|
||||
|
||||
/* Build command packet. */
|
||||
req->current_outstanding_cmd = handle;
|
||||
req->outstanding_cmds[handle] = sp;
|
||||
sp->handle = handle;
|
||||
cmd->host_scribble = (unsigned char *)(unsigned long)handle;
|
||||
req->cnt -= req_cnt;
|
||||
|
||||
cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
|
||||
cmd_pkt->handle = make_handle(req->id, handle);
|
||||
|
||||
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
|
||||
clr_ptr = (uint32_t *)cmd_pkt + 2;
|
||||
memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
|
||||
cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
|
||||
|
||||
/* Set NPORT-ID and LUN number */
|
||||
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
|
||||
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
|
||||
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
|
||||
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
|
||||
cmd_pkt->vp_index = sp->vha->vp_idx;
|
||||
|
||||
/* Build IOCB segments */
|
||||
qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds);
|
||||
|
||||
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
|
||||
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
|
||||
|
||||
/* build FCP_CMND IU */
|
||||
int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
|
||||
ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
|
||||
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
ctx->fcp_cmnd->additional_cdb_len |= 1;
|
||||
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
|
||||
ctx->fcp_cmnd->additional_cdb_len |= 2;
|
||||
|
||||
/* Populate the FCP_PRIO. */
|
||||
if (ha->flags.fcp_prio_enabled)
|
||||
ctx->fcp_cmnd->task_attribute |=
|
||||
sp->fcport->fcp_prio << 3;
|
||||
|
||||
memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
|
||||
|
||||
fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
|
||||
additional_cdb_len);
|
||||
*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
|
||||
|
||||
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
|
||||
put_unaligned_le64(ctx->fcp_cmnd_dma,
|
||||
&cmd_pkt->fcp_cmnd_dseg_address);
|
||||
|
||||
sp->flags |= SRB_FCP_CMND_DMA_VALID;
|
||||
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
|
||||
/* Set total data segment count. */
|
||||
cmd_pkt->entry_count = (uint8_t)req_cnt;
|
||||
|
||||
wmb();
|
||||
/* Adjust ring index. */
|
||||
req->ring_index++;
|
||||
if (req->ring_index == req->length) {
|
||||
req->ring_index = 0;
|
||||
req->ring_ptr = req->ring;
|
||||
} else {
|
||||
req->ring_ptr++;
|
||||
}
|
||||
|
||||
sp->qpair->cmd_cnt++;
|
||||
sp->flags |= SRB_DMA_VALID;
|
||||
|
||||
/* Set chip new ring index. */
|
||||
wrt_reg_dword(req->req_q_in, req->ring_index);
|
||||
|
||||
/* Manage unprocessed RIO/ZIO commands in response queue. */
|
||||
if (vha->flags.process_response_queue &&
|
||||
rsp->ring_ptr->signature != RESPONSE_PROCESSED)
|
||||
qla24xx_process_response_queue(vha, rsp);
|
||||
|
||||
spin_unlock_irqrestore(&qpair->qp_lock, flags);
|
||||
|
||||
return QLA_SUCCESS;
|
||||
|
||||
queuing_error_fcp_cmnd:
|
||||
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
|
||||
|
||||
queuing_error:
|
||||
if (tot_dsds)
|
||||
scsi_dma_unmap(cmd);
|
||||
|
||||
qla_put_fw_resources(sp->qpair, &sp->iores);
|
||||
|
||||
if (sp->u.scmd.crc_ctx) {
|
||||
mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
|
||||
sp->u.scmd.crc_ctx = NULL;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&qpair->qp_lock, flags);
|
||||
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
|
|
@ -56,6 +56,22 @@ const char *const port_state_str[] = {
|
|||
[FCS_ONLINE] = "ONLINE"
|
||||
};
|
||||
|
||||
#define SFP_DISABLE_LASER_INITIATED 0x15 /* Sub code of 8070 AEN */
|
||||
#define SFP_ENABLE_LASER_INITIATED 0x16 /* Sub code of 8070 AEN */
|
||||
|
||||
static inline void display_Laser_info(scsi_qla_host_t *vha,
|
||||
u16 mb1, u16 mb2, u16 mb3) {
|
||||
|
||||
if (mb1 == SFP_DISABLE_LASER_INITIATED)
|
||||
ql_log(ql_log_warn, vha, 0xf0a2,
|
||||
"SFP temperature (%d C) reached/exceeded the threshold (%d C). Laser is disabled.\n",
|
||||
mb3, mb2);
|
||||
if (mb1 == SFP_ENABLE_LASER_INITIATED)
|
||||
ql_log(ql_log_warn, vha, 0xf0a3,
|
||||
"SFP temperature (%d C) reached normal operating level. Laser is enabled.\n",
|
||||
mb3);
|
||||
}
|
||||
|
||||
static void
|
||||
qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
|
||||
{
|
||||
|
@ -823,6 +839,135 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* qla27xx_copy_multiple_pkt() - Copy over purex/purls packets that can
|
||||
* span over multiple IOCBs.
|
||||
* @vha: SCSI driver HA context
|
||||
* @pkt: ELS packet
|
||||
* @rsp: Response queue
|
||||
* @is_purls: True, for Unsolicited Received FC-NVMe LS rsp IOCB
|
||||
* false, for Unsolicited Received ELS IOCB
|
||||
* @byte_order: True, to change the byte ordering of iocb payload
|
||||
*/
|
||||
struct purex_item *
|
||||
qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, void **pkt,
|
||||
struct rsp_que **rsp, bool is_purls,
|
||||
bool byte_order)
|
||||
{
|
||||
struct purex_entry_24xx *purex = NULL;
|
||||
struct pt_ls4_rx_unsol *purls = NULL;
|
||||
struct rsp_que *rsp_q = *rsp;
|
||||
sts_cont_entry_t *new_pkt;
|
||||
uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
|
||||
uint16_t buffer_copy_offset = 0, payload_size = 0;
|
||||
uint16_t entry_count, entry_count_remaining;
|
||||
struct purex_item *item;
|
||||
void *iocb_pkt = NULL;
|
||||
|
||||
if (is_purls) {
|
||||
purls = *pkt;
|
||||
total_bytes = (le16_to_cpu(purls->frame_size) & 0x0FFF) -
|
||||
PURX_ELS_HEADER_SIZE;
|
||||
entry_count = entry_count_remaining = purls->entry_count;
|
||||
payload_size = sizeof(purls->payload);
|
||||
} else {
|
||||
purex = *pkt;
|
||||
total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) -
|
||||
PURX_ELS_HEADER_SIZE;
|
||||
entry_count = entry_count_remaining = purex->entry_count;
|
||||
payload_size = sizeof(purex->els_frame_payload);
|
||||
}
|
||||
|
||||
pending_bytes = total_bytes;
|
||||
no_bytes = (pending_bytes > payload_size) ? payload_size :
|
||||
pending_bytes;
|
||||
ql_dbg(ql_dbg_async, vha, 0x509a,
|
||||
"%s LS, frame_size 0x%x, entry count %d\n",
|
||||
(is_purls ? "PURLS" : "FPIN"), total_bytes, entry_count);
|
||||
|
||||
item = qla24xx_alloc_purex_item(vha, total_bytes);
|
||||
if (!item)
|
||||
return item;
|
||||
|
||||
iocb_pkt = &item->iocb;
|
||||
|
||||
if (is_purls)
|
||||
memcpy(iocb_pkt, &purls->payload[0], no_bytes);
|
||||
else
|
||||
memcpy(iocb_pkt, &purex->els_frame_payload[0], no_bytes);
|
||||
buffer_copy_offset += no_bytes;
|
||||
pending_bytes -= no_bytes;
|
||||
--entry_count_remaining;
|
||||
|
||||
if (is_purls)
|
||||
((response_t *)purls)->signature = RESPONSE_PROCESSED;
|
||||
else
|
||||
((response_t *)purex)->signature = RESPONSE_PROCESSED;
|
||||
wmb();
|
||||
|
||||
do {
|
||||
while ((total_bytes > 0) && (entry_count_remaining > 0)) {
|
||||
if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
|
||||
ql_dbg(ql_dbg_async, vha, 0x5084,
|
||||
"Ran out of IOCBs, partial data 0x%x\n",
|
||||
buffer_copy_offset);
|
||||
cpu_relax();
|
||||
continue;
|
||||
}
|
||||
|
||||
new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
|
||||
*pkt = new_pkt;
|
||||
|
||||
if (new_pkt->entry_type != STATUS_CONT_TYPE) {
|
||||
ql_log(ql_log_warn, vha, 0x507a,
|
||||
"Unexpected IOCB type, partial data 0x%x\n",
|
||||
buffer_copy_offset);
|
||||
break;
|
||||
}
|
||||
|
||||
rsp_q->ring_index++;
|
||||
if (rsp_q->ring_index == rsp_q->length) {
|
||||
rsp_q->ring_index = 0;
|
||||
rsp_q->ring_ptr = rsp_q->ring;
|
||||
} else {
|
||||
rsp_q->ring_ptr++;
|
||||
}
|
||||
no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
|
||||
sizeof(new_pkt->data) : pending_bytes;
|
||||
if ((buffer_copy_offset + no_bytes) <= total_bytes) {
|
||||
memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
|
||||
new_pkt->data, no_bytes);
|
||||
buffer_copy_offset += no_bytes;
|
||||
pending_bytes -= no_bytes;
|
||||
--entry_count_remaining;
|
||||
} else {
|
||||
ql_log(ql_log_warn, vha, 0x5044,
|
||||
"Attempt to copy more that we got, optimizing..%x\n",
|
||||
buffer_copy_offset);
|
||||
memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
|
||||
new_pkt->data,
|
||||
total_bytes - buffer_copy_offset);
|
||||
}
|
||||
|
||||
((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
|
||||
wmb();
|
||||
}
|
||||
|
||||
if (pending_bytes != 0 || entry_count_remaining != 0) {
|
||||
ql_log(ql_log_fatal, vha, 0x508b,
|
||||
"Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
|
||||
total_bytes, entry_count_remaining);
|
||||
qla24xx_free_purex_item(item);
|
||||
return NULL;
|
||||
}
|
||||
} while (entry_count_remaining > 0);
|
||||
|
||||
if (byte_order)
|
||||
host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
|
||||
|
||||
return item;
|
||||
}
|
||||
|
||||
int
|
||||
qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
|
||||
{
|
||||
|
@ -958,7 +1103,7 @@ initialize_purex_header:
|
|||
return item;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
|
||||
void (*process_item)(struct scsi_qla_host *vha,
|
||||
struct purex_item *pkt))
|
||||
|
@ -1798,6 +1943,8 @@ global_port_update:
|
|||
break;
|
||||
|
||||
case MBA_TEMPERATURE_ALERT:
|
||||
if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
|
||||
display_Laser_info(vha, mb[1], mb[2], mb[3]);
|
||||
ql_dbg(ql_dbg_async, vha, 0x505e,
|
||||
"TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
|
||||
break;
|
||||
|
@ -3811,6 +3958,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
|
|||
struct qla_hw_data *ha = vha->hw;
|
||||
struct purex_entry_24xx *purex_entry;
|
||||
struct purex_item *pure_item;
|
||||
struct pt_ls4_rx_unsol *p;
|
||||
u16 rsp_in = 0, cur_ring_index;
|
||||
int is_shadow_hba;
|
||||
|
||||
|
@ -3983,7 +4131,19 @@ process_err:
|
|||
qla28xx_sa_update_iocb_entry(vha, rsp->req,
|
||||
(struct sa_update_28xx *)pkt);
|
||||
break;
|
||||
case PT_LS4_UNSOL:
|
||||
p = (void *)pkt;
|
||||
if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
|
||||
rsp->ring_ptr = (response_t *)pkt;
|
||||
rsp->ring_index = cur_ring_index;
|
||||
|
||||
ql_dbg(ql_dbg_init, vha, 0x2124,
|
||||
"Defer processing UNSOL LS req opcode %#x...\n",
|
||||
p->payload[0]);
|
||||
return;
|
||||
}
|
||||
qla2xxx_process_purls_iocb((void **)&pkt, &rsp);
|
||||
break;
|
||||
default:
|
||||
/* Type Not Supported. */
|
||||
ql_dbg(ql_dbg_async, vha, 0x5042,
|
||||
|
|
|
@ -273,7 +273,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
|||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
wait_time = jiffies;
|
||||
atomic_inc(&ha->num_pend_mbx_stage3);
|
||||
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
|
||||
mcp->tov * HZ)) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x117a,
|
||||
|
@ -290,7 +289,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
|||
spin_unlock_irqrestore(&ha->hardware_lock,
|
||||
flags);
|
||||
atomic_dec(&ha->num_pend_mbx_stage2);
|
||||
atomic_dec(&ha->num_pend_mbx_stage3);
|
||||
rval = QLA_ABORTED;
|
||||
goto premature_exit;
|
||||
}
|
||||
|
@ -302,11 +300,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
|||
ha->flags.mbox_busy = 0;
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
atomic_dec(&ha->num_pend_mbx_stage2);
|
||||
atomic_dec(&ha->num_pend_mbx_stage3);
|
||||
rval = QLA_ABORTED;
|
||||
goto premature_exit;
|
||||
}
|
||||
atomic_dec(&ha->num_pend_mbx_stage3);
|
||||
|
||||
if (time_after(jiffies, wait_time + 5 * HZ))
|
||||
ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
|
||||
|
|
|
@ -12,6 +12,26 @@
|
|||
#include <linux/blk-mq.h>
|
||||
|
||||
static struct nvme_fc_port_template qla_nvme_fc_transport;
|
||||
static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha,
|
||||
struct qla_qpair *qp,
|
||||
struct qla_nvme_lsrjt_pt_arg *a,
|
||||
bool is_xchg_terminate);
|
||||
|
||||
struct qla_nvme_unsol_ctx {
|
||||
struct list_head elem;
|
||||
struct scsi_qla_host *vha;
|
||||
struct fc_port *fcport;
|
||||
struct srb *sp;
|
||||
struct nvmefc_ls_rsp lsrsp;
|
||||
struct nvmefc_ls_rsp *fd_rsp;
|
||||
struct work_struct lsrsp_work;
|
||||
struct work_struct abort_work;
|
||||
__le32 exchange_address;
|
||||
__le16 nport_handle;
|
||||
__le16 ox_id;
|
||||
int comp_status;
|
||||
spinlock_t cmd_lock;
|
||||
};
|
||||
|
||||
int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
|
||||
{
|
||||
|
@ -216,6 +236,55 @@ static void qla_nvme_sp_ls_done(srb_t *sp, int res)
|
|||
schedule_work(&priv->ls_work);
|
||||
}
|
||||
|
||||
static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref)
|
||||
{
|
||||
struct srb *sp = container_of(kref, struct srb, cmd_kref);
|
||||
struct qla_nvme_unsol_ctx *uctx = sp->priv;
|
||||
struct nvmefc_ls_rsp *fd_rsp;
|
||||
unsigned long flags;
|
||||
|
||||
if (!uctx) {
|
||||
qla2x00_rel_sp(sp);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&uctx->cmd_lock, flags);
|
||||
uctx->sp = NULL;
|
||||
sp->priv = NULL;
|
||||
spin_unlock_irqrestore(&uctx->cmd_lock, flags);
|
||||
|
||||
fd_rsp = uctx->fd_rsp;
|
||||
|
||||
list_del(&uctx->elem);
|
||||
|
||||
fd_rsp->done(fd_rsp);
|
||||
kfree(uctx);
|
||||
qla2x00_rel_sp(sp);
|
||||
}
|
||||
|
||||
static void qla_nvme_lsrsp_complete(struct work_struct *work)
|
||||
{
|
||||
struct qla_nvme_unsol_ctx *uctx =
|
||||
container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work);
|
||||
|
||||
kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref);
|
||||
}
|
||||
|
||||
static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res)
|
||||
{
|
||||
struct qla_nvme_unsol_ctx *uctx = sp->priv;
|
||||
|
||||
if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
|
||||
return;
|
||||
|
||||
if (res)
|
||||
res = -EINVAL;
|
||||
|
||||
uctx->comp_status = res;
|
||||
INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete);
|
||||
schedule_work(&uctx->lsrsp_work);
|
||||
}
|
||||
|
||||
/* it assumed that QPair lock is held. */
|
||||
static void qla_nvme_sp_done(srb_t *sp, int res)
|
||||
{
|
||||
|
@ -288,6 +357,92 @@ out:
|
|||
kref_put(&sp->cmd_kref, sp->put_fn);
|
||||
}
|
||||
|
||||
static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
|
||||
struct nvme_fc_remote_port *rport,
|
||||
struct nvmefc_ls_rsp *fd_resp)
|
||||
{
|
||||
struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp,
|
||||
struct qla_nvme_unsol_ctx, lsrsp);
|
||||
struct qla_nvme_rport *qla_rport = rport->private;
|
||||
fc_port_t *fcport = qla_rport->fcport;
|
||||
struct scsi_qla_host *vha = uctx->vha;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_nvme_lsrjt_pt_arg a;
|
||||
struct srb_iocb *nvme;
|
||||
srb_t *sp;
|
||||
int rval = QLA_FUNCTION_FAILED;
|
||||
uint8_t cnt = 0;
|
||||
|
||||
if (!fcport || fcport->deleted)
|
||||
goto out;
|
||||
|
||||
if (!ha->flags.fw_started)
|
||||
goto out;
|
||||
|
||||
/* Alloc SRB structure */
|
||||
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
|
||||
if (!sp)
|
||||
goto out;
|
||||
|
||||
sp->type = SRB_NVME_LS;
|
||||
sp->name = "nvme_ls";
|
||||
sp->done = qla_nvme_sp_lsrsp_done;
|
||||
sp->put_fn = qla_nvme_release_lsrsp_cmd_kref;
|
||||
sp->priv = (void *)uctx;
|
||||
sp->unsol_rsp = 1;
|
||||
uctx->sp = sp;
|
||||
spin_lock_init(&uctx->cmd_lock);
|
||||
nvme = &sp->u.iocb_cmd;
|
||||
uctx->fd_rsp = fd_resp;
|
||||
nvme->u.nvme.desc = fd_resp;
|
||||
nvme->u.nvme.dir = 0;
|
||||
nvme->u.nvme.dl = 0;
|
||||
nvme->u.nvme.timeout_sec = 0;
|
||||
nvme->u.nvme.cmd_dma = fd_resp->rspdma;
|
||||
nvme->u.nvme.cmd_len = fd_resp->rsplen;
|
||||
nvme->u.nvme.rsp_len = 0;
|
||||
nvme->u.nvme.rsp_dma = 0;
|
||||
nvme->u.nvme.exchange_address = uctx->exchange_address;
|
||||
nvme->u.nvme.nport_handle = uctx->nport_handle;
|
||||
nvme->u.nvme.ox_id = uctx->ox_id;
|
||||
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
|
||||
le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE);
|
||||
|
||||
ql_dbg(ql_dbg_unsol, vha, 0x2122,
|
||||
"Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
|
||||
fcport->d_id.b24, fcport->port_name, uctx->exchange_address,
|
||||
uctx->ox_id, uctx->nport_handle);
|
||||
retry:
|
||||
rval = qla2x00_start_sp(sp);
|
||||
switch (rval) {
|
||||
case QLA_SUCCESS:
|
||||
break;
|
||||
case EAGAIN:
|
||||
msleep(PURLS_MSLEEP_INTERVAL);
|
||||
cnt++;
|
||||
if (cnt < PURLS_RETRY_COUNT)
|
||||
goto retry;
|
||||
|
||||
fallthrough;
|
||||
default:
|
||||
ql_dbg(ql_log_warn, vha, 0x2123,
|
||||
"Failed to xmit Unsol ls response = %d\n", rval);
|
||||
rval = -EIO;
|
||||
qla2x00_rel_sp(sp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
memset((void *)&a, 0, sizeof(a));
|
||||
a.vp_idx = vha->vp_idx;
|
||||
a.nport_handle = uctx->nport_handle;
|
||||
a.xchg_address = uctx->exchange_address;
|
||||
qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true);
|
||||
kfree(uctx);
|
||||
return rval;
|
||||
}
|
||||
|
||||
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
|
||||
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
|
||||
{
|
||||
|
@ -355,7 +510,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
|
|||
nvme->u.nvme.timeout_sec = fd->timeout;
|
||||
nvme->u.nvme.cmd_dma = fd->rqstdma;
|
||||
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
|
||||
fd->rqstlen, DMA_TO_DEVICE);
|
||||
le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE);
|
||||
|
||||
rval = qla2x00_start_sp(sp);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
|
@ -720,6 +875,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
|
|||
.ls_abort = qla_nvme_ls_abort,
|
||||
.fcp_io = qla_nvme_post_cmd,
|
||||
.fcp_abort = qla_nvme_fcp_abort,
|
||||
.xmt_ls_rsp = qla_nvme_xmt_ls_rsp,
|
||||
.map_queues = qla_nvme_map_queues,
|
||||
.max_hw_queues = DEF_NVME_HW_QUEUES,
|
||||
.max_sgl_segments = 1024,
|
||||
|
@ -924,3 +1080,247 @@ inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
|
|||
return;
|
||||
kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
|
||||
}
|
||||
|
||||
static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason,
|
||||
u8 explanation, u8 vendor)
|
||||
{
|
||||
struct fcnvme_ls_rjt *rjt = buf;
|
||||
|
||||
rjt->w0.ls_cmd = FCNVME_LSDESC_RQST;
|
||||
rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt));
|
||||
rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
|
||||
rjt->rqst.desc_len =
|
||||
fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
|
||||
rjt->rqst.w0.ls_cmd = ls_cmd;
|
||||
rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
|
||||
rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
|
||||
rjt->rjt.reason_code = reason;
|
||||
rjt->rjt.reason_explanation = explanation;
|
||||
rjt->rjt.vendor = vendor;
|
||||
}
|
||||
|
||||
static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha,
|
||||
struct pt_ls4_request *lsrjt_iocb,
|
||||
struct qla_nvme_lsrjt_pt_arg *a)
|
||||
{
|
||||
lsrjt_iocb->entry_type = PT_LS4_REQUEST;
|
||||
lsrjt_iocb->entry_count = 1;
|
||||
lsrjt_iocb->sys_define = 0;
|
||||
lsrjt_iocb->entry_status = 0;
|
||||
lsrjt_iocb->handle = QLA_SKIP_HANDLE;
|
||||
lsrjt_iocb->nport_handle = a->nport_handle;
|
||||
lsrjt_iocb->exchange_address = a->xchg_address;
|
||||
lsrjt_iocb->vp_index = a->vp_idx;
|
||||
|
||||
lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags);
|
||||
|
||||
put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address);
|
||||
lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count);
|
||||
lsrjt_iocb->tx_dseg_count = cpu_to_le16(1);
|
||||
lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
|
||||
|
||||
put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address);
|
||||
lsrjt_iocb->dsd[1].length = 0;
|
||||
lsrjt_iocb->rx_dseg_count = 0;
|
||||
lsrjt_iocb->rx_byte_count = 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp,
|
||||
struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate)
|
||||
{
|
||||
struct pt_ls4_request *lsrjt_iocb;
|
||||
|
||||
lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL);
|
||||
if (!lsrjt_iocb) {
|
||||
ql_log(ql_log_warn, vha, 0x210e,
|
||||
"qla2x00_alloc_iocbs failed.\n");
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
||||
if (!is_xchg_terminate) {
|
||||
qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode,
|
||||
a->reason, a->explanation, 0);
|
||||
|
||||
a->tx_byte_count = sizeof(struct fcnvme_ls_rjt);
|
||||
a->tx_addr = vha->hw->lsrjt.cdma;
|
||||
a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT;
|
||||
|
||||
ql_dbg(ql_dbg_unsol, vha, 0x211f,
|
||||
"Sending nvme fc ls reject ox_id %04x op %04x\n",
|
||||
a->ox_id, a->opcode);
|
||||
ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f,
|
||||
vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c));
|
||||
} else {
|
||||
a->tx_byte_count = 0;
|
||||
a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT;
|
||||
ql_dbg(ql_dbg_unsol, vha, 0x2110,
|
||||
"Terminate nvme ls xchg 0x%x\n", a->xchg_address);
|
||||
}
|
||||
|
||||
qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a);
|
||||
/* flush iocb to mem before notifying hw doorbell */
|
||||
wmb();
|
||||
qla2x00_start_iocbs(vha, qp->req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* qla2xxx_process_purls_pkt() - Pass-up Unsolicited
|
||||
* Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req().
|
||||
* LLDD need to provide memory for response buffer, which
|
||||
* will be used to reference the exchange corresponding
|
||||
* to the LS when issuing an ls response. LLDD will have to free
|
||||
* response buffer in lport->ops->xmt_ls_rsp().
|
||||
*
|
||||
* @vha: SCSI qla host
|
||||
* @item: ptr to purex_item
|
||||
*/
|
||||
static void
|
||||
qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item)
|
||||
{
|
||||
struct qla_nvme_unsol_ctx *uctx = item->purls_context;
|
||||
struct qla_nvme_lsrjt_pt_arg a;
|
||||
int ret = 1;
|
||||
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
ret = nvme_fc_rcv_ls_req(uctx->fcport->nvme_remote_port, &uctx->lsrsp,
|
||||
&item->iocb, item->size);
|
||||
#endif
|
||||
if (ret) {
|
||||
ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n");
|
||||
memset((void *)&a, 0, sizeof(a));
|
||||
a.vp_idx = vha->vp_idx;
|
||||
a.nport_handle = uctx->nport_handle;
|
||||
a.xchg_address = uctx->exchange_address;
|
||||
qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true);
|
||||
list_del(&uctx->elem);
|
||||
kfree(uctx);
|
||||
}
|
||||
}
|
||||
|
||||
static scsi_qla_host_t *
|
||||
qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index)
|
||||
{
|
||||
scsi_qla_host_t *base_vha, *vha, *tvp;
|
||||
unsigned long flags;
|
||||
|
||||
base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
if (!vp_index && !ha->num_vhosts)
|
||||
return base_vha;
|
||||
|
||||
spin_lock_irqsave(&ha->vport_slock, flags);
|
||||
list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
|
||||
if (vha->vp_idx == vp_index) {
|
||||
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
||||
return vha;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
|
||||
{
|
||||
struct nvme_fc_remote_port *rport;
|
||||
struct qla_nvme_rport *qla_rport;
|
||||
struct qla_nvme_lsrjt_pt_arg a;
|
||||
struct pt_ls4_rx_unsol *p = *pkt;
|
||||
struct qla_nvme_unsol_ctx *uctx;
|
||||
struct rsp_que *rsp_q = *rsp;
|
||||
struct qla_hw_data *ha;
|
||||
scsi_qla_host_t *vha;
|
||||
fc_port_t *fcport = NULL;
|
||||
struct purex_item *item;
|
||||
port_id_t d_id = {0};
|
||||
port_id_t id = {0};
|
||||
u8 *opcode;
|
||||
bool xmt_reject = false;
|
||||
|
||||
ha = rsp_q->hw;
|
||||
|
||||
vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index);
|
||||
if (!vha) {
|
||||
ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index);
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
|
||||
memset((void *)&a, 0, sizeof(a));
|
||||
opcode = (u8 *)&p->payload[0];
|
||||
a.opcode = opcode[3];
|
||||
a.vp_idx = p->vp_index;
|
||||
a.nport_handle = p->nport_handle;
|
||||
a.ox_id = p->ox_id;
|
||||
a.xchg_address = p->exchange_address;
|
||||
|
||||
id.b.domain = p->s_id.domain;
|
||||
id.b.area = p->s_id.area;
|
||||
id.b.al_pa = p->s_id.al_pa;
|
||||
d_id.b.domain = p->d_id[2];
|
||||
d_id.b.area = p->d_id[1];
|
||||
d_id.b.al_pa = p->d_id[0];
|
||||
|
||||
fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0);
|
||||
if (!fcport) {
|
||||
ql_dbg(ql_dbg_unsol, vha, 0x211e,
|
||||
"Failed to find sid=%06x did=%06x\n",
|
||||
id.b24, d_id.b24);
|
||||
a.reason = FCNVME_RJT_RC_INV_ASSOC;
|
||||
a.explanation = FCNVME_RJT_EXP_NONE;
|
||||
xmt_reject = true;
|
||||
goto out;
|
||||
}
|
||||
rport = fcport->nvme_remote_port;
|
||||
qla_rport = rport->private;
|
||||
|
||||
item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false);
|
||||
if (!item) {
|
||||
a.reason = FCNVME_RJT_RC_LOGIC;
|
||||
a.explanation = FCNVME_RJT_EXP_NONE;
|
||||
xmt_reject = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC);
|
||||
if (!uctx) {
|
||||
ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n");
|
||||
a.reason = FCNVME_RJT_RC_LOGIC;
|
||||
a.explanation = FCNVME_RJT_EXP_NONE;
|
||||
xmt_reject = true;
|
||||
kfree(item);
|
||||
goto out;
|
||||
}
|
||||
|
||||
uctx->vha = vha;
|
||||
uctx->fcport = fcport;
|
||||
uctx->exchange_address = p->exchange_address;
|
||||
uctx->nport_handle = p->nport_handle;
|
||||
uctx->ox_id = p->ox_id;
|
||||
qla_rport->uctx = uctx;
|
||||
INIT_LIST_HEAD(&uctx->elem);
|
||||
list_add_tail(&uctx->elem, &fcport->unsol_ctx_head);
|
||||
item->purls_context = (void *)uctx;
|
||||
|
||||
ql_dbg(ql_dbg_unsol, vha, 0x2121,
|
||||
"PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
|
||||
item->iocb.iocb[3], item->size, uctx->exchange_address,
|
||||
fcport->d_id.b24);
|
||||
/* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||
* ----- -----------------------------------------------
|
||||
* 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00
|
||||
* 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00
|
||||
* 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
|
||||
*/
|
||||
ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120,
|
||||
&item->iocb, item->size);
|
||||
|
||||
qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt);
|
||||
out:
|
||||
if (xmt_reject) {
|
||||
qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false);
|
||||
__qla_consume_iocb(vha, pkt, rsp);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#define Q2T_NVME_NUM_TAGS 2048
|
||||
#define QLA_MAX_FC_SEGMENTS 64
|
||||
|
||||
struct qla_nvme_unsol_ctx;
|
||||
struct scsi_qla_host;
|
||||
struct qla_hw_data;
|
||||
struct req_que;
|
||||
|
@ -37,6 +38,7 @@ struct nvme_private {
|
|||
|
||||
struct qla_nvme_rport {
|
||||
struct fc_port *fcport;
|
||||
struct qla_nvme_unsol_ctx *uctx;
|
||||
};
|
||||
|
||||
#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */
|
||||
|
@ -75,6 +77,9 @@ struct cmd_nvme {
|
|||
struct dsd64 nvme_dsd;
|
||||
};
|
||||
|
||||
#define PURLS_MSLEEP_INTERVAL 1
|
||||
#define PURLS_RETRY_COUNT 5
|
||||
|
||||
#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
|
||||
struct pt_ls4_request {
|
||||
uint8_t entry_type;
|
||||
|
@ -118,21 +123,19 @@ struct pt_ls4_rx_unsol {
|
|||
__le32 exchange_address;
|
||||
uint8_t d_id[3];
|
||||
uint8_t r_ctl;
|
||||
be_id_t s_id;
|
||||
le_id_t s_id;
|
||||
uint8_t cs_ctl;
|
||||
uint8_t f_ctl[3];
|
||||
uint8_t type;
|
||||
__le16 seq_cnt;
|
||||
uint8_t df_ctl;
|
||||
uint8_t seq_id;
|
||||
__le16 rx_id;
|
||||
__le16 ox_id;
|
||||
__le32 param;
|
||||
__le32 desc0;
|
||||
__le16 rx_id;
|
||||
__le16 ox_id;
|
||||
__le32 desc0;
|
||||
#define PT_LS4_PAYLOAD_OFFSET 0x2c
|
||||
#define PT_LS4_FIRST_PACKET_LEN 20
|
||||
__le32 desc_len;
|
||||
__le32 payload[3];
|
||||
__le32 payload[5];
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -857,7 +857,9 @@ struct fcp_cmnd {
|
|||
uint8_t task_attribute;
|
||||
uint8_t task_management;
|
||||
uint8_t additional_cdb_len;
|
||||
uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
|
||||
#define QLA_CDB_BUF_SIZE 256
|
||||
#define QLA_FCP_DL_SIZE 4
|
||||
uint8_t cdb[QLA_CDB_BUF_SIZE + QLA_FCP_DL_SIZE]; /* 256 for CDB len and 4 for FCP_DL */
|
||||
};
|
||||
|
||||
struct dsd_dma {
|
||||
|
|
|
@ -44,10 +44,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
|
|||
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
|
||||
"Set this to take full dump on MPI hang.");
|
||||
|
||||
int ql2xenforce_iocb_limit = 1;
|
||||
int ql2xenforce_iocb_limit = 2;
|
||||
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
|
||||
"Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
|
||||
"Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
|
||||
"1: track usage per queue, 2: track usage per adapter");
|
||||
|
||||
/*
|
||||
* CT6 CTX allocation cache
|
||||
|
@ -432,6 +433,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
|
|||
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
|
||||
ha->base_qpair->srb_mempool = ha->srb_mempool;
|
||||
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
|
||||
INIT_LIST_HEAD(&ha->base_qpair->dsd_list);
|
||||
ha->base_qpair->enable_class_2 = ql2xenableclass2;
|
||||
/* init qpair to this cpu. Will adjust at run time. */
|
||||
qla_cpu_update(rsp->qpair, raw_smp_processor_id());
|
||||
|
@ -750,9 +752,9 @@ void qla2x00_sp_free_dma(srb_t *sp)
|
|||
|
||||
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
|
||||
ctx1->fcp_cmnd_dma);
|
||||
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
|
||||
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
|
||||
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
|
||||
list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
|
||||
sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
|
||||
sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
|
||||
}
|
||||
|
||||
if (sp->flags & SRB_GOT_BUF)
|
||||
|
@ -836,9 +838,9 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp)
|
|||
|
||||
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
|
||||
ctx1->fcp_cmnd_dma);
|
||||
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
|
||||
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
|
||||
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
|
||||
list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
|
||||
sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
|
||||
sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
|
||||
sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
|
||||
}
|
||||
|
||||
|
@ -3007,7 +3009,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
|
||||
atomic_set(&ha->num_pend_mbx_stage1, 0);
|
||||
atomic_set(&ha->num_pend_mbx_stage2, 0);
|
||||
atomic_set(&ha->num_pend_mbx_stage3, 0);
|
||||
atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
|
||||
ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
|
||||
INIT_LIST_HEAD(&ha->tmf_pending);
|
||||
|
@ -3288,6 +3289,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
host->max_id = ha->max_fibre_devices;
|
||||
host->cmd_per_lun = 3;
|
||||
host->unique_id = host->host_no;
|
||||
|
||||
if (ql2xenabledif && ql2xenabledif != 2) {
|
||||
ql_log(ql_log_warn, base_vha, 0x302d,
|
||||
"Invalid value for ql2xenabledif, resetting it to default (2)\n");
|
||||
ql2xenabledif = 2;
|
||||
}
|
||||
|
||||
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
|
||||
host->max_cmd_len = 32;
|
||||
else
|
||||
|
@ -3524,8 +3532,6 @@ skip_dpc:
|
|||
base_vha->flags.difdix_supported = 1;
|
||||
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
|
||||
"Registering for DIF/DIX type 1 and 3 protection.\n");
|
||||
if (ql2xenabledif == 1)
|
||||
prot = SHOST_DIX_TYPE0_PROTECTION;
|
||||
if (ql2xprotmask)
|
||||
scsi_host_set_prot(host, ql2xprotmask);
|
||||
else
|
||||
|
@ -4402,7 +4408,6 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
|
|||
"sf_init_cb=%p.\n", ha->sf_init_cb);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&ha->gbl_dsd_list);
|
||||
|
||||
/* Get consistent memory allocated for Async Port-Database. */
|
||||
if (!IS_FWI2_CAPABLE(ha)) {
|
||||
|
@ -4457,8 +4462,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
|
|||
|
||||
ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
|
||||
ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
|
||||
ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
|
||||
|
||||
ha->elsrej.size,
|
||||
&ha->elsrej.cdma,
|
||||
GFP_KERNEL);
|
||||
if (!ha->elsrej.c) {
|
||||
ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
|
||||
"Alloc failed for els reject cmd.\n");
|
||||
|
@ -4467,8 +4473,21 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
|
|||
ha->elsrej.c->er_cmd = ELS_LS_RJT;
|
||||
ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
|
||||
ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
|
||||
|
||||
ha->lsrjt.size = sizeof(struct fcnvme_ls_rjt);
|
||||
ha->lsrjt.c = dma_alloc_coherent(&ha->pdev->dev, ha->lsrjt.size,
|
||||
&ha->lsrjt.cdma, GFP_KERNEL);
|
||||
if (!ha->lsrjt.c) {
|
||||
ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
|
||||
"Alloc failed for nvme fc reject cmd.\n");
|
||||
goto fail_lsrjt;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail_lsrjt:
|
||||
dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
|
||||
ha->elsrej.c, ha->elsrej.cdma);
|
||||
fail_elsrej:
|
||||
dma_pool_destroy(ha->purex_dma_pool);
|
||||
fail_flt:
|
||||
|
@ -4934,18 +4953,16 @@ qla2x00_mem_free(struct qla_hw_data *ha)
|
|||
ha->gid_list = NULL;
|
||||
ha->gid_list_dma = 0;
|
||||
|
||||
if (IS_QLA82XX(ha)) {
|
||||
if (!list_empty(&ha->gbl_dsd_list)) {
|
||||
struct dsd_dma *dsd_ptr, *tdsd_ptr;
|
||||
if (!list_empty(&ha->base_qpair->dsd_list)) {
|
||||
struct dsd_dma *dsd_ptr, *tdsd_ptr;
|
||||
|
||||
/* clean up allocated prev pool */
|
||||
list_for_each_entry_safe(dsd_ptr,
|
||||
tdsd_ptr, &ha->gbl_dsd_list, list) {
|
||||
dma_pool_free(ha->dl_dma_pool,
|
||||
dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
|
||||
list_del(&dsd_ptr->list);
|
||||
kfree(dsd_ptr);
|
||||
}
|
||||
/* clean up allocated prev pool */
|
||||
list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
|
||||
&ha->base_qpair->dsd_list, list) {
|
||||
dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
|
||||
dsd_ptr->dsd_list_dma);
|
||||
list_del(&dsd_ptr->list);
|
||||
kfree(dsd_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5000,6 +5017,12 @@ qla2x00_mem_free(struct qla_hw_data *ha)
|
|||
ha->elsrej.c = NULL;
|
||||
}
|
||||
|
||||
if (ha->lsrjt.c) {
|
||||
dma_free_coherent(&ha->pdev->dev, ha->lsrjt.size, ha->lsrjt.c,
|
||||
ha->lsrjt.cdma);
|
||||
ha->lsrjt.c = NULL;
|
||||
}
|
||||
|
||||
ha->init_cb = NULL;
|
||||
ha->init_cb_dma = 0;
|
||||
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
/*
|
||||
* Driver version
|
||||
*/
|
||||
#define QLA2XXX_VERSION "10.02.08.500-k"
|
||||
#define QLA2XXX_VERSION "10.02.09.100-k"
|
||||
|
||||
#define QLA_DRIVER_MAJOR_VER 10
|
||||
#define QLA_DRIVER_MINOR_VER 2
|
||||
#define QLA_DRIVER_PATCH_VER 8
|
||||
#define QLA_DRIVER_BETA_VER 500
|
||||
#define QLA_DRIVER_PATCH_VER 9
|
||||
#define QLA_DRIVER_BETA_VER 100
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include "scsi_debugfs.h"
|
||||
|
||||
#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
|
||||
|
@ -33,14 +34,33 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags,
|
|||
|
||||
void scsi_show_rq(struct seq_file *m, struct request *rq)
|
||||
{
|
||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq), *cmd2;
|
||||
struct Scsi_Host *shost = cmd->device->host;
|
||||
int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
|
||||
int timeout_ms = jiffies_to_msecs(rq->timeout);
|
||||
const char *list_info = NULL;
|
||||
char buf[80] = "(?)";
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
list_for_each_entry(cmd2, &shost->eh_abort_list, eh_entry) {
|
||||
if (cmd == cmd2) {
|
||||
list_info = "on eh_abort_list";
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
list_for_each_entry(cmd2, &shost->eh_cmd_q, eh_entry) {
|
||||
if (cmd == cmd2) {
|
||||
list_info = "on eh_cmd_q";
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
__scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
|
||||
seq_printf(m, ", .cmd=%s, .retries=%d, .result = %#x, .flags=", buf,
|
||||
cmd->retries, cmd->result);
|
||||
seq_printf(m, ", .cmd=%s, .retries=%d, .allowed=%d, .result = %#x, %s%s.flags=",
|
||||
buf, cmd->retries, cmd->allowed, cmd->result,
|
||||
list_info ? : "", list_info ? ", " : "");
|
||||
scsi_flags_show(m, cmd->flags, scsi_cmd_flags,
|
||||
ARRAY_SIZE(scsi_cmd_flags));
|
||||
seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago",
|
||||
|
|
|
@ -2454,7 +2454,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
|
|||
envp[idx++] = "SDEV_MEDIA_CHANGE=1";
|
||||
break;
|
||||
case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
scsi_rescan_device(sdev);
|
||||
envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
|
||||
break;
|
||||
case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
|
||||
|
|
|
@ -137,7 +137,6 @@ extern int scsi_complete_async_scans(void);
|
|||
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
|
||||
unsigned int, u64, enum scsi_scan_mode);
|
||||
extern void scsi_forget_host(struct Scsi_Host *);
|
||||
extern void scsi_rescan_device(struct device *);
|
||||
|
||||
/* scsi_sysctl.c */
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
|
|
@ -1619,9 +1619,9 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
|
|||
}
|
||||
EXPORT_SYMBOL(scsi_add_device);
|
||||
|
||||
void scsi_rescan_device(struct device *dev)
|
||||
void scsi_rescan_device(struct scsi_device *sdev)
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
struct device *dev = &sdev->sdev_gendev;
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
|
|
|
@ -747,7 +747,7 @@ static ssize_t
|
|||
store_rescan_field (struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
scsi_rescan_device(dev);
|
||||
scsi_rescan_device(to_scsi_device(dev));
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
|
||||
|
@ -840,7 +840,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
|
|||
* waiting for pending I/O to finish.
|
||||
*/
|
||||
blk_mq_run_hw_queues(sdev->request_queue, true);
|
||||
scsi_rescan_device(dev);
|
||||
scsi_rescan_device(sdev);
|
||||
}
|
||||
|
||||
return ret == 0 ? count : -EINVAL;
|
||||
|
|
|
@ -104,19 +104,7 @@ static void sd_config_discard(struct scsi_disk *, unsigned int);
|
|||
static void sd_config_write_same(struct scsi_disk *);
|
||||
static int sd_revalidate_disk(struct gendisk *);
|
||||
static void sd_unlock_native_capacity(struct gendisk *disk);
|
||||
static int sd_probe(struct device *);
|
||||
static int sd_remove(struct device *);
|
||||
static void sd_shutdown(struct device *);
|
||||
static int sd_suspend_system(struct device *);
|
||||
static int sd_suspend_runtime(struct device *);
|
||||
static int sd_resume_system(struct device *);
|
||||
static int sd_resume_runtime(struct device *);
|
||||
static void sd_rescan(struct device *);
|
||||
static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
|
||||
static void sd_uninit_command(struct scsi_cmnd *SCpnt);
|
||||
static int sd_done(struct scsi_cmnd *);
|
||||
static void sd_eh_reset(struct scsi_cmnd *);
|
||||
static int sd_eh_action(struct scsi_cmnd *, int);
|
||||
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
|
||||
static void scsi_disk_release(struct device *cdev);
|
||||
|
||||
|
@ -592,33 +580,6 @@ static struct class sd_disk_class = {
|
|||
.dev_groups = sd_disk_groups,
|
||||
};
|
||||
|
||||
static const struct dev_pm_ops sd_pm_ops = {
|
||||
.suspend = sd_suspend_system,
|
||||
.resume = sd_resume_system,
|
||||
.poweroff = sd_suspend_system,
|
||||
.restore = sd_resume_system,
|
||||
.runtime_suspend = sd_suspend_runtime,
|
||||
.runtime_resume = sd_resume_runtime,
|
||||
};
|
||||
|
||||
static struct scsi_driver sd_template = {
|
||||
.gendrv = {
|
||||
.name = "sd",
|
||||
.owner = THIS_MODULE,
|
||||
.probe = sd_probe,
|
||||
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
||||
.remove = sd_remove,
|
||||
.shutdown = sd_shutdown,
|
||||
.pm = &sd_pm_ops,
|
||||
},
|
||||
.rescan = sd_rescan,
|
||||
.init_command = sd_init_command,
|
||||
.uninit_command = sd_uninit_command,
|
||||
.done = sd_done,
|
||||
.eh_action = sd_eh_action,
|
||||
.eh_reset = sd_eh_reset,
|
||||
};
|
||||
|
||||
/*
|
||||
* Don't request a new module, as that could deadlock in multipath
|
||||
* environment.
|
||||
|
@ -3929,6 +3890,33 @@ static int sd_resume_runtime(struct device *dev)
|
|||
return sd_resume(dev);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops sd_pm_ops = {
|
||||
.suspend = sd_suspend_system,
|
||||
.resume = sd_resume_system,
|
||||
.poweroff = sd_suspend_system,
|
||||
.restore = sd_resume_system,
|
||||
.runtime_suspend = sd_suspend_runtime,
|
||||
.runtime_resume = sd_resume_runtime,
|
||||
};
|
||||
|
||||
static struct scsi_driver sd_template = {
|
||||
.gendrv = {
|
||||
.name = "sd",
|
||||
.owner = THIS_MODULE,
|
||||
.probe = sd_probe,
|
||||
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
||||
.remove = sd_remove,
|
||||
.shutdown = sd_shutdown,
|
||||
.pm = &sd_pm_ops,
|
||||
},
|
||||
.rescan = sd_rescan,
|
||||
.init_command = sd_init_command,
|
||||
.uninit_command = sd_uninit_command,
|
||||
.done = sd_done,
|
||||
.eh_action = sd_eh_action,
|
||||
.eh_reset = sd_eh_reset,
|
||||
};
|
||||
|
||||
/**
|
||||
* init_sd - entry point for this driver (both when built in or when
|
||||
* a module).
|
||||
|
|
|
@ -710,7 +710,7 @@ typedef u32 pqi_index_t;
|
|||
#define SOP_TMF_COMPLETE 0x0
|
||||
#define SOP_TMF_REJECTED 0x4
|
||||
#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
|
||||
#define SOP_RC_INCORRECT_LOGICAL_UNIT 0x9
|
||||
#define SOP_TMF_INCORRECT_LOGICAL_UNIT 0x9
|
||||
|
||||
/* additional CDB bytes usage field codes */
|
||||
#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
|
||||
|
@ -1085,7 +1085,16 @@ struct pqi_stream_data {
|
|||
u32 last_accessed;
|
||||
};
|
||||
|
||||
#define PQI_MAX_LUNS_PER_DEVICE 256
|
||||
#define PQI_MAX_LUNS_PER_DEVICE 256
|
||||
|
||||
struct pqi_tmf_work {
|
||||
struct work_struct work_struct;
|
||||
struct scsi_cmnd *scmd;
|
||||
struct pqi_ctrl_info *ctrl_info;
|
||||
struct pqi_scsi_dev *device;
|
||||
u8 lun;
|
||||
u8 scsi_opcode;
|
||||
};
|
||||
|
||||
struct pqi_scsi_dev {
|
||||
int devtype; /* as reported by INQUIRY command */
|
||||
|
@ -1111,6 +1120,7 @@ struct pqi_scsi_dev {
|
|||
u8 erase_in_progress : 1;
|
||||
bool aio_enabled; /* only valid for physical disks */
|
||||
bool in_remove;
|
||||
bool in_reset[PQI_MAX_LUNS_PER_DEVICE];
|
||||
bool device_offline;
|
||||
u8 vendor[8]; /* bytes 8-15 of inquiry data */
|
||||
u8 model[16]; /* bytes 16-31 of inquiry data */
|
||||
|
@ -1149,6 +1159,8 @@ struct pqi_scsi_dev {
|
|||
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
|
||||
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
|
||||
unsigned int raid_bypass_cnt;
|
||||
|
||||
struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE];
|
||||
};
|
||||
|
||||
/* VPD inquiry pages */
|
||||
|
|
|
@ -33,11 +33,11 @@
|
|||
#define BUILD_TIMESTAMP
|
||||
#endif
|
||||
|
||||
#define DRIVER_VERSION "2.1.22-040"
|
||||
#define DRIVER_VERSION "2.1.24-046"
|
||||
#define DRIVER_MAJOR 2
|
||||
#define DRIVER_MINOR 1
|
||||
#define DRIVER_RELEASE 22
|
||||
#define DRIVER_REVISION 40
|
||||
#define DRIVER_RELEASE 24
|
||||
#define DRIVER_REVISION 46
|
||||
|
||||
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
|
||||
DRIVER_VERSION BUILD_TIMESTAMP ")"
|
||||
|
@ -48,6 +48,8 @@
|
|||
#define PQI_POST_RESET_DELAY_SECS 5
|
||||
#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
|
||||
|
||||
#define PQI_NO_COMPLETION ((void *)-1)
|
||||
|
||||
MODULE_AUTHOR("Microchip");
|
||||
MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
|
||||
DRIVER_VERSION);
|
||||
|
@ -96,6 +98,7 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
|
|||
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
|
||||
struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
|
||||
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
|
||||
static void pqi_tmf_worker(struct work_struct *work);
|
||||
|
||||
/* for flags argument to pqi_submit_raid_request_synchronous() */
|
||||
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
|
||||
|
@ -455,6 +458,21 @@ static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
|
|||
return device->in_remove;
|
||||
}
|
||||
|
||||
static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
|
||||
{
|
||||
device->in_reset[lun] = true;
|
||||
}
|
||||
|
||||
static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
|
||||
{
|
||||
device->in_reset[lun] = false;
|
||||
}
|
||||
|
||||
static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
|
||||
{
|
||||
return device->in_reset[lun];
|
||||
}
|
||||
|
||||
static inline int pqi_event_type_to_event_index(unsigned int event_type)
|
||||
{
|
||||
int index;
|
||||
|
@ -2137,6 +2155,15 @@ static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
|
|||
return device->sdev != NULL;
|
||||
}
|
||||
|
||||
static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
|
||||
{
|
||||
unsigned int lun;
|
||||
struct pqi_tmf_work *tmf_work;
|
||||
|
||||
for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
|
||||
INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
|
||||
}
|
||||
|
||||
static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
|
||||
struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
|
||||
{
|
||||
|
@ -2217,6 +2244,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
|
|||
list_add_tail(&device->add_list_entry, &add_list);
|
||||
/* To prevent this device structure from being freed later. */
|
||||
device->keep_device = true;
|
||||
pqi_init_device_tmf_work(device);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
|
||||
|
@ -2257,7 +2285,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
|
|||
device->advertised_queue_depth = device->queue_depth;
|
||||
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
|
||||
if (device->rescan) {
|
||||
scsi_rescan_device(&device->sdev->sdev_gendev);
|
||||
scsi_rescan_device(device->sdev);
|
||||
device->rescan = false;
|
||||
}
|
||||
}
|
||||
|
@ -3330,7 +3358,7 @@ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_inf
|
|||
case SOP_TMF_REJECTED:
|
||||
rc = -EAGAIN;
|
||||
break;
|
||||
case SOP_RC_INCORRECT_LOGICAL_UNIT:
|
||||
case SOP_TMF_INCORRECT_LOGICAL_UNIT:
|
||||
rc = -ENODEV;
|
||||
break;
|
||||
default:
|
||||
|
@ -5628,7 +5656,6 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
|
|||
int rc;
|
||||
struct pqi_io_request *io_request;
|
||||
struct pqi_aio_path_request *request;
|
||||
struct pqi_scsi_dev *device;
|
||||
|
||||
io_request = pqi_alloc_io_request(ctrl_info, scmd);
|
||||
if (!io_request)
|
||||
|
@ -5648,9 +5675,8 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
|
|||
request->command_priority = io_high_prio;
|
||||
put_unaligned_le16(io_request->index, &request->request_id);
|
||||
request->error_index = request->request_id;
|
||||
device = scmd->device->hostdata;
|
||||
if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
|
||||
put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
|
||||
if (!raid_bypass && ctrl_info->multi_lun_device_supported)
|
||||
put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
|
||||
if (cdb_length > sizeof(request->cdb))
|
||||
cdb_length = sizeof(request->cdb);
|
||||
request->cdb_length = cdb_length;
|
||||
|
@ -5850,6 +5876,7 @@ static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
|
|||
void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct pqi_scsi_dev *device;
|
||||
struct completion *wait;
|
||||
|
||||
if (!scmd->device) {
|
||||
set_host_byte(scmd, DID_NO_CONNECT);
|
||||
|
@ -5863,6 +5890,10 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
|
|||
}
|
||||
|
||||
atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
|
||||
|
||||
wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
|
||||
if (wait != PQI_NO_COMPLETION)
|
||||
complete(wait);
|
||||
}
|
||||
|
||||
static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
|
||||
|
@ -5948,6 +5979,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
|
|||
u16 hw_queue;
|
||||
struct pqi_queue_group *queue_group;
|
||||
bool raid_bypassed;
|
||||
u8 lun;
|
||||
|
||||
scmd->host_scribble = PQI_NO_COMPLETION;
|
||||
|
||||
device = scmd->device->hostdata;
|
||||
|
||||
|
@ -5957,7 +5991,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
|
|||
return 0;
|
||||
}
|
||||
|
||||
atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
|
||||
lun = (u8)scmd->device->lun;
|
||||
|
||||
atomic_inc(&device->scsi_cmds_outstanding[lun]);
|
||||
|
||||
ctrl_info = shost_to_hba(shost);
|
||||
|
||||
|
@ -5967,7 +6003,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (pqi_ctrl_blocked(ctrl_info)) {
|
||||
if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
|
||||
rc = SCSI_MLQUEUE_HOST_BUSY;
|
||||
goto out;
|
||||
}
|
||||
|
@ -6002,8 +6038,10 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
|
|||
}
|
||||
|
||||
out:
|
||||
if (rc)
|
||||
atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
|
||||
if (rc) {
|
||||
scmd->host_scribble = NULL;
|
||||
atomic_dec(&device->scsi_cmds_outstanding[lun]);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -6097,7 +6135,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
|
|||
}
|
||||
|
||||
static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
|
||||
struct pqi_scsi_dev *device)
|
||||
struct pqi_scsi_dev *device, u8 lun)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int path;
|
||||
|
@ -6127,6 +6165,9 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
|
|||
if (scsi_device != device)
|
||||
continue;
|
||||
|
||||
if ((u8)scmd->device->lun != lun)
|
||||
continue;
|
||||
|
||||
list_del(&io_request->request_list_entry);
|
||||
set_host_byte(scmd, DID_RESET);
|
||||
pqi_free_io_request(io_request);
|
||||
|
@ -6224,15 +6265,13 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
|
|||
|
||||
#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
|
||||
|
||||
static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
|
||||
static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
|
||||
{
|
||||
int rc;
|
||||
struct pqi_io_request *io_request;
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
struct pqi_task_management_request *request;
|
||||
struct pqi_scsi_dev *device;
|
||||
|
||||
device = scmd->device->hostdata;
|
||||
io_request = pqi_alloc_io_request(ctrl_info, NULL);
|
||||
io_request->io_complete_callback = pqi_lun_reset_complete;
|
||||
io_request->context = &wait;
|
||||
|
@ -6247,7 +6286,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
|
|||
memcpy(request->lun_number, device->scsi3addr,
|
||||
sizeof(request->lun_number));
|
||||
if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
|
||||
request->ml_device_lun_number = (u8)scmd->device->lun;
|
||||
request->ml_device_lun_number = lun;
|
||||
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
|
||||
if (ctrl_info->tmf_iu_timeout_supported)
|
||||
put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
|
||||
|
@ -6255,7 +6294,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
|
|||
pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
|
||||
io_request);
|
||||
|
||||
rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
|
||||
rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
|
||||
if (rc == 0)
|
||||
rc = io_request->status;
|
||||
|
||||
|
@ -6269,18 +6308,16 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
|
|||
#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
|
||||
#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
|
||||
|
||||
static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
|
||||
static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
|
||||
{
|
||||
int reset_rc;
|
||||
int wait_rc;
|
||||
unsigned int retries;
|
||||
unsigned long timeout_msecs;
|
||||
struct pqi_scsi_dev *device;
|
||||
|
||||
device = scmd->device->hostdata;
|
||||
for (retries = 0;;) {
|
||||
reset_rc = pqi_lun_reset(ctrl_info, scmd);
|
||||
if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
|
||||
reset_rc = pqi_lun_reset(ctrl_info, device, lun);
|
||||
if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
|
||||
break;
|
||||
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
|
||||
}
|
||||
|
@ -6288,65 +6325,127 @@ static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct sc
|
|||
timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
|
||||
PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
|
||||
|
||||
wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
|
||||
wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
|
||||
if (wait_rc && reset_rc == 0)
|
||||
reset_rc = wait_rc;
|
||||
|
||||
return reset_rc == 0 ? SUCCESS : FAILED;
|
||||
}
|
||||
|
||||
static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
|
||||
static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
|
||||
{
|
||||
int rc;
|
||||
struct pqi_scsi_dev *device;
|
||||
|
||||
device = scmd->device->hostdata;
|
||||
pqi_ctrl_block_requests(ctrl_info);
|
||||
pqi_ctrl_wait_until_quiesced(ctrl_info);
|
||||
pqi_fail_io_queued_for_device(ctrl_info, device);
|
||||
pqi_fail_io_queued_for_device(ctrl_info, device, lun);
|
||||
rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
|
||||
pqi_device_reset_start(device, lun);
|
||||
pqi_ctrl_unblock_requests(ctrl_info);
|
||||
if (rc)
|
||||
rc = FAILED;
|
||||
else
|
||||
rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
|
||||
pqi_ctrl_unblock_requests(ctrl_info);
|
||||
rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
|
||||
pqi_device_reset_done(device, lun);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
|
||||
{
|
||||
int rc;
|
||||
|
||||
mutex_lock(&ctrl_info->lun_reset_mutex);
|
||||
|
||||
dev_err(&ctrl_info->pci_dev->dev,
|
||||
"resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
|
||||
ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
|
||||
|
||||
pqi_check_ctrl_health(ctrl_info);
|
||||
if (pqi_ctrl_offline(ctrl_info))
|
||||
rc = FAILED;
|
||||
else
|
||||
rc = pqi_device_reset(ctrl_info, device, lun);
|
||||
|
||||
dev_err(&ctrl_info->pci_dev->dev,
|
||||
"reset of scsi %d:%d:%d:%u: %s\n",
|
||||
ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
|
||||
rc == SUCCESS ? "SUCCESS" : "FAILED");
|
||||
|
||||
mutex_unlock(&ctrl_info->lun_reset_mutex);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
|
||||
{
|
||||
int rc;
|
||||
struct Scsi_Host *shost;
|
||||
struct pqi_ctrl_info *ctrl_info;
|
||||
struct pqi_scsi_dev *device;
|
||||
u8 scsi_opcode;
|
||||
|
||||
shost = scmd->device->host;
|
||||
ctrl_info = shost_to_hba(shost);
|
||||
device = scmd->device->hostdata;
|
||||
scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
|
||||
|
||||
return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
|
||||
}
|
||||
|
||||
static void pqi_tmf_worker(struct work_struct *work)
|
||||
{
|
||||
struct pqi_tmf_work *tmf_work;
|
||||
struct scsi_cmnd *scmd;
|
||||
|
||||
tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
|
||||
scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
|
||||
|
||||
pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
|
||||
}
|
||||
|
||||
static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct Scsi_Host *shost;
|
||||
struct pqi_ctrl_info *ctrl_info;
|
||||
struct pqi_scsi_dev *device;
|
||||
struct pqi_tmf_work *tmf_work;
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
|
||||
shost = scmd->device->host;
|
||||
ctrl_info = shost_to_hba(shost);
|
||||
device = scmd->device->hostdata;
|
||||
|
||||
mutex_lock(&ctrl_info->lun_reset_mutex);
|
||||
dev_err(&ctrl_info->pci_dev->dev,
|
||||
"attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
|
||||
shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
|
||||
|
||||
if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
|
||||
dev_err(&ctrl_info->pci_dev->dev,
|
||||
"scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
|
||||
shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
|
||||
scmd->result = DID_RESET << 16;
|
||||
goto out;
|
||||
}
|
||||
|
||||
tmf_work = &device->tmf_work[scmd->device->lun];
|
||||
|
||||
if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
|
||||
tmf_work->ctrl_info = ctrl_info;
|
||||
tmf_work->device = device;
|
||||
tmf_work->lun = (u8)scmd->device->lun;
|
||||
tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
|
||||
schedule_work(&tmf_work->work_struct);
|
||||
}
|
||||
|
||||
wait_for_completion(&wait);
|
||||
|
||||
dev_err(&ctrl_info->pci_dev->dev,
|
||||
"resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
|
||||
shost->host_no,
|
||||
device->bus, device->target, (u32)scmd->device->lun,
|
||||
scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
|
||||
"TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
|
||||
shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
|
||||
|
||||
pqi_check_ctrl_health(ctrl_info);
|
||||
if (pqi_ctrl_offline(ctrl_info))
|
||||
rc = FAILED;
|
||||
else
|
||||
rc = pqi_device_reset(ctrl_info, scmd);
|
||||
out:
|
||||
|
||||
dev_err(&ctrl_info->pci_dev->dev,
|
||||
"reset of scsi %d:%d:%d:%d: %s\n",
|
||||
shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
|
||||
rc == SUCCESS ? "SUCCESS" : "FAILED");
|
||||
|
||||
mutex_unlock(&ctrl_info->lun_reset_mutex);
|
||||
|
||||
return rc;
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
static int pqi_slave_alloc(struct scsi_device *sdev)
|
||||
|
@ -6470,21 +6569,21 @@ static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *ar
|
|||
struct pci_dev *pci_dev;
|
||||
u32 subsystem_vendor;
|
||||
u32 subsystem_device;
|
||||
cciss_pci_info_struct pciinfo;
|
||||
cciss_pci_info_struct pci_info;
|
||||
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
pci_dev = ctrl_info->pci_dev;
|
||||
|
||||
pciinfo.domain = pci_domain_nr(pci_dev->bus);
|
||||
pciinfo.bus = pci_dev->bus->number;
|
||||
pciinfo.dev_fn = pci_dev->devfn;
|
||||
pci_info.domain = pci_domain_nr(pci_dev->bus);
|
||||
pci_info.bus = pci_dev->bus->number;
|
||||
pci_info.dev_fn = pci_dev->devfn;
|
||||
subsystem_vendor = pci_dev->subsystem_vendor;
|
||||
subsystem_device = pci_dev->subsystem_device;
|
||||
pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
|
||||
pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
|
||||
|
||||
if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
|
||||
if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
|
@ -7362,6 +7461,7 @@ static const struct scsi_host_template pqi_driver_template = {
|
|||
.scan_finished = pqi_scan_finished,
|
||||
.this_id = -1,
|
||||
.eh_device_reset_handler = pqi_eh_device_reset_handler,
|
||||
.eh_abort_handler = pqi_eh_abort_handler,
|
||||
.ioctl = pqi_ioctl,
|
||||
.slave_alloc = pqi_slave_alloc,
|
||||
.slave_configure = pqi_slave_configure,
|
||||
|
@ -8904,6 +9004,52 @@ static void pqi_ctrl_offline_worker(struct work_struct *work)
|
|||
pqi_take_ctrl_offline_deferred(ctrl_info);
|
||||
}
|
||||
|
||||
static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
|
||||
{
|
||||
char *string;
|
||||
|
||||
switch (ctrl_shutdown_reason) {
|
||||
case PQI_IQ_NOT_DRAINED_TIMEOUT:
|
||||
string = "inbound queue not drained timeout";
|
||||
break;
|
||||
case PQI_LUN_RESET_TIMEOUT:
|
||||
string = "LUN reset timeout";
|
||||
break;
|
||||
case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
|
||||
string = "I/O pending timeout after LUN reset";
|
||||
break;
|
||||
case PQI_NO_HEARTBEAT:
|
||||
string = "no controller heartbeat detected";
|
||||
break;
|
||||
case PQI_FIRMWARE_KERNEL_NOT_UP:
|
||||
string = "firmware kernel not ready";
|
||||
break;
|
||||
case PQI_OFA_RESPONSE_TIMEOUT:
|
||||
string = "OFA response timeout";
|
||||
break;
|
||||
case PQI_INVALID_REQ_ID:
|
||||
string = "invalid request ID";
|
||||
break;
|
||||
case PQI_UNMATCHED_REQ_ID:
|
||||
string = "unmatched request ID";
|
||||
break;
|
||||
case PQI_IO_PI_OUT_OF_RANGE:
|
||||
string = "I/O queue producer index out of range";
|
||||
break;
|
||||
case PQI_EVENT_PI_OUT_OF_RANGE:
|
||||
string = "event queue producer index out of range";
|
||||
break;
|
||||
case PQI_UNEXPECTED_IU_TYPE:
|
||||
string = "unexpected IU type";
|
||||
break;
|
||||
default:
|
||||
string = "unknown reason";
|
||||
break;
|
||||
}
|
||||
|
||||
return string;
|
||||
}
|
||||
|
||||
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
|
||||
enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
|
||||
{
|
||||
|
@ -8916,7 +9062,9 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
|
|||
if (!pqi_disable_ctrl_shutdown)
|
||||
sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
|
||||
pci_disable_device(ctrl_info->pci_dev);
|
||||
dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
|
||||
dev_err(&ctrl_info->pci_dev->dev,
|
||||
"controller offline: reason code 0x%x (%s)\n",
|
||||
ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
|
||||
schedule_work(&ctrl_info->ctrl_offline_work);
|
||||
}
|
||||
|
||||
|
@ -9062,7 +9210,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
|
|||
rc = pqi_flush_cache(ctrl_info, shutdown_event);
|
||||
if (rc)
|
||||
dev_err(&pci_dev->dev,
|
||||
"unable to flush controller cache\n");
|
||||
"unable to flush controller cache during shutdown\n");
|
||||
|
||||
pqi_crash_if_pending_command(ctrl_info);
|
||||
pqi_reset(ctrl_info);
|
||||
|
|
|
@ -414,6 +414,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
|
|||
if (cmdstatp->have_sense &&
|
||||
cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17)
|
||||
STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */
|
||||
if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29)
|
||||
STp->pos_unknown = 1; /* ASC => power on / reset */
|
||||
|
||||
STp->pos_unknown |= STp->device->was_reset;
|
||||
|
||||
|
|
|
@ -475,7 +475,7 @@ static void storvsc_device_scan(struct work_struct *work)
|
|||
sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
|
||||
if (!sdev)
|
||||
goto done;
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
scsi_rescan_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
|
||||
done:
|
||||
|
|
|
@ -325,7 +325,7 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
|
|||
/* Handle "Parameters changed", "Mode parameters changed", and
|
||||
"Capacity data has changed". */
|
||||
if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
scsi_rescan_device(sdev);
|
||||
|
||||
scsi_device_put(sdev);
|
||||
}
|
||||
|
|
|
@ -743,7 +743,7 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
|
|||
if (info->host_active == STATE_ERROR)
|
||||
return -EIO;
|
||||
|
||||
if (info && current == info->curr) {
|
||||
if (current == info->curr) {
|
||||
err = xenbus_printf(XBT_NIL, info->dev->nodename,
|
||||
info->dev_state_path, "%d", XenbusStateConnected);
|
||||
if (err) {
|
||||
|
@ -761,7 +761,7 @@ static void scsifront_sdev_destroy(struct scsi_device *sdev)
|
|||
struct vscsifrnt_info *info = shost_priv(sdev->host);
|
||||
int err;
|
||||
|
||||
if (info && current == info->curr) {
|
||||
if (current == info->curr) {
|
||||
err = xenbus_printf(XBT_NIL, info->dev->nodename,
|
||||
info->dev_state_path, "%d", XenbusStateClosed);
|
||||
if (err)
|
||||
|
@ -903,7 +903,7 @@ static int scsifront_probe(struct xenbus_device *dev,
|
|||
xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
|
||||
return err;
|
||||
}
|
||||
info = (struct vscsifrnt_info *)host->hostdata;
|
||||
info = shost_priv(host);
|
||||
|
||||
dev_set_drvdata(&dev->dev, info);
|
||||
info->dev = dev;
|
||||
|
|
|
@ -76,8 +76,7 @@ static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *j
|
|||
int ret;
|
||||
int data_len;
|
||||
|
||||
if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en ||
|
||||
!(hba->capabilities & MASK_EHSLUTRD_SUPPORTED))
|
||||
if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en)
|
||||
return -EINVAL;
|
||||
|
||||
if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
|
||||
|
|
|
@ -7240,11 +7240,17 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
|
|||
/* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
|
||||
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
|
||||
|
||||
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
|
||||
/*
|
||||
* According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
|
||||
* EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
|
||||
* HW controller takes EHS length from UTRD.
|
||||
*/
|
||||
if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
|
||||
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
|
||||
else
|
||||
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
|
||||
|
||||
/* update the task tag and LUN in the request upiu */
|
||||
req_upiu->header.flags = upiu_flags;
|
||||
req_upiu->header.lun = UFS_UPIU_RPMB_WLUN;
|
||||
/* update the task tag */
|
||||
req_upiu->header.task_tag = tag;
|
||||
|
||||
/* copy the UPIU(contains CDB) request as it is */
|
||||
|
|
|
@ -53,10 +53,10 @@
|
|||
struct nvmefc_ls_req {
|
||||
void *rqstaddr;
|
||||
dma_addr_t rqstdma;
|
||||
u32 rqstlen;
|
||||
__le32 rqstlen;
|
||||
void *rspaddr;
|
||||
dma_addr_t rspdma;
|
||||
u32 rsplen;
|
||||
__le32 rsplen;
|
||||
u32 timeout;
|
||||
|
||||
void *private;
|
||||
|
@ -120,7 +120,7 @@ struct nvmefc_ls_req {
|
|||
struct nvmefc_ls_rsp {
|
||||
void *rspbuf;
|
||||
dma_addr_t rspdma;
|
||||
u16 rsplen;
|
||||
__le32 rsplen;
|
||||
|
||||
void (*done)(struct nvmefc_ls_rsp *rsp);
|
||||
void *nvme_fc_private; /* LLDD is not to access !! */
|
||||
|
|
|
@ -764,7 +764,7 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
|
|||
#define scsi_template_proc_dir(sht) NULL
|
||||
#endif
|
||||
extern void scsi_scan_host(struct Scsi_Host *);
|
||||
extern void scsi_rescan_device(struct device *);
|
||||
extern void scsi_rescan_device(struct scsi_device *);
|
||||
extern void scsi_remove_host(struct Scsi_Host *);
|
||||
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
|
||||
extern int scsi_host_busy(struct Scsi_Host *shost);
|
||||
|
|
|
@ -83,7 +83,7 @@ struct utp_upiu_header {
|
|||
union {
|
||||
__u8 tm_function;
|
||||
__u8 query_function;
|
||||
};
|
||||
} __attribute__((packed));
|
||||
__u8 response;
|
||||
__u8 status;
|
||||
__u8 ehs_length;
|
||||
|
|
Loading…
Reference in New Issue