SCSI fixes on 20230916
16 small(ish) fixes all in drivers. The major fixes are in pm8001 (fixes MSI-X issue going back to its origin), the qla2xxx endianness fix, which fixes a bug on big endian and the lpfc ones which can cause an oops on module removal without them. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCZQXNvyYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishRKxAP4kCOBR UOGLJJdhmotofj9GX0Rr1qAFheJmTSAT6ctAOgD+IWucKDqKfbRLbmzqiSvmWmul P0STz4VnUKs9LqdLyok= =3wre -----END PGP SIGNATURE----- Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI fixes from James Bottomley: "16 small(ish) fixes all in drivers. The major fixes are in pm8001 (fixes MSI-X issue going back to its origin), the qla2xxx endianness fix, which fixes a bug on big endian and the lpfc ones which can cause an oops on module removal without them" * tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: scsi: lpfc: Prevent use-after-free during rmmod with mapped NVMe rports scsi: lpfc: Early return after marking final NLP_DROPPED flag in dev_loss_tmo scsi: lpfc: Fix the NULL vs IS_ERR() bug for debugfs_create_file() scsi: target: core: Fix target_cmd_counter leak scsi: pm8001: Setup IRQs on resume scsi: pm80xx: Avoid leaking tags when processing OPC_INB_SET_CONTROLLER_CONFIG command scsi: pm80xx: Use phy-specific SAS address when sending PHY_START command scsi: ufs: core: Poll HCS.UCRDY before issuing a UIC command scsi: ufs: core: Move __ufshcd_send_uic_cmd() outside host_lock scsi: qedf: Add synchronization between I/O completions and abort scsi: target: Replace strlcpy() with strscpy() scsi: qla2xxx: Fix NULL vs IS_ERR() bug for debugfs_create_dir() scsi: qla2xxx: Use raw_smp_processor_id() instead of smp_processor_id() scsi: qla2xxx: Correct endianness for rqstlen and rsplen scsi: ppa: Fix accidentally reversed conditions for 16-bit and 32-bit EPP scsi: megaraid_sas: Fix deadlock on firmware crashdump
This commit is contained in:
commit
ad8a69f361
|
@ -6073,7 +6073,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
|
|||
phba->hba_debugfs_root,
|
||||
phba,
|
||||
&lpfc_debugfs_op_multixripools);
|
||||
if (!phba->debug_multixri_pools) {
|
||||
if (IS_ERR(phba->debug_multixri_pools)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"0527 Cannot create debugfs multixripools\n");
|
||||
goto debug_failed;
|
||||
|
@ -6085,7 +6085,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
|
|||
debugfs_create_file(name, S_IFREG | 0644,
|
||||
phba->hba_debugfs_root,
|
||||
phba, &lpfc_cgn_buffer_op);
|
||||
if (!phba->debug_cgn_buffer) {
|
||||
if (IS_ERR(phba->debug_cgn_buffer)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"6527 Cannot create debugfs "
|
||||
"cgn_buffer\n");
|
||||
|
@ -6098,7 +6098,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
|
|||
debugfs_create_file(name, S_IFREG | 0644,
|
||||
phba->hba_debugfs_root,
|
||||
phba, &lpfc_rx_monitor_op);
|
||||
if (!phba->debug_rx_monitor) {
|
||||
if (IS_ERR(phba->debug_rx_monitor)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"6528 Cannot create debugfs "
|
||||
"rx_monitor\n");
|
||||
|
@ -6111,7 +6111,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
|
|||
debugfs_create_file(name, 0644,
|
||||
phba->hba_debugfs_root,
|
||||
phba, &lpfc_debugfs_ras_log);
|
||||
if (!phba->debug_ras_log) {
|
||||
if (IS_ERR(phba->debug_ras_log)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"6148 Cannot create debugfs"
|
||||
" ras_log\n");
|
||||
|
@ -6132,7 +6132,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
|
|||
debugfs_create_file(name, S_IFREG | 0644,
|
||||
phba->hba_debugfs_root,
|
||||
phba, &lpfc_debugfs_op_lockstat);
|
||||
if (!phba->debug_lockstat) {
|
||||
if (IS_ERR(phba->debug_lockstat)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"4610 Can't create debugfs lockstat\n");
|
||||
goto debug_failed;
|
||||
|
@ -6358,7 +6358,7 @@ nvmeio_off:
|
|||
debugfs_create_file(name, 0644,
|
||||
vport->vport_debugfs_root,
|
||||
vport, &lpfc_debugfs_op_scsistat);
|
||||
if (!vport->debug_scsistat) {
|
||||
if (IS_ERR(vport->debug_scsistat)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"4611 Cannot create debugfs scsistat\n");
|
||||
goto debug_failed;
|
||||
|
@ -6369,7 +6369,7 @@ nvmeio_off:
|
|||
debugfs_create_file(name, 0644,
|
||||
vport->vport_debugfs_root,
|
||||
vport, &lpfc_debugfs_op_ioktime);
|
||||
if (!vport->debug_ioktime) {
|
||||
if (IS_ERR(vport->debug_ioktime)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"0815 Cannot create debugfs ioktime\n");
|
||||
goto debug_failed;
|
||||
|
|
|
@ -199,11 +199,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
|
|||
/* Only 1 thread can drop the initial node reference. If
|
||||
* another thread has set NLP_DROPPED, this thread is done.
|
||||
*/
|
||||
if (!(ndlp->nlp_flag & NLP_DROPPED)) {
|
||||
if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) &&
|
||||
!(ndlp->nlp_flag & NLP_DROPPED)) {
|
||||
ndlp->nlp_flag |= NLP_DROPPED;
|
||||
spin_unlock_irqrestore(&ndlp->lock, iflags);
|
||||
lpfc_nlp_put(ndlp);
|
||||
spin_lock_irqsave(&ndlp->lock, iflags);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ndlp->lock, iflags);
|
||||
|
|
|
@ -228,8 +228,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
|||
spin_unlock_irq(&ndlp->lock);
|
||||
|
||||
/* On a devloss timeout event, one more put is executed provided the
|
||||
* NVME and SCSI rport unregister requests are complete. If the vport
|
||||
* is unloading, this extra put is executed by lpfc_drop_node.
|
||||
* NVME and SCSI rport unregister requests are complete.
|
||||
*/
|
||||
if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
|
||||
|
@ -2567,11 +2566,7 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||
* nvme_transport perspective. Loss of an rport just means IO cannot
|
||||
* be sent and recovery is completely up to the initator.
|
||||
* For now, the driver just unbinds the DID and port_role so that
|
||||
* no further IO can be issued. Changes are planned for later.
|
||||
*
|
||||
* Notes - the ndlp reference count is not decremented here since
|
||||
* since there is no nvme_transport api for devloss. Node ref count
|
||||
* is only adjusted in driver unload.
|
||||
* no further IO can be issued.
|
||||
*/
|
||||
void
|
||||
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
|
@ -2646,6 +2641,21 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||
"6167 NVME unregister failed %d "
|
||||
"port_state x%x\n",
|
||||
ret, remoteport->port_state);
|
||||
|
||||
if (vport->load_flag & FC_UNLOADING) {
|
||||
/* Only 1 thread can drop the initial node
|
||||
* reference. Check if another thread has set
|
||||
* NLP_DROPPED.
|
||||
*/
|
||||
spin_lock_irq(&ndlp->lock);
|
||||
if (!(ndlp->nlp_flag & NLP_DROPPED)) {
|
||||
ndlp->nlp_flag |= NLP_DROPPED;
|
||||
spin_unlock_irq(&ndlp->lock);
|
||||
lpfc_nlp_put(ndlp);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irq(&ndlp->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
|
|
@ -2332,7 +2332,7 @@ struct megasas_instance {
|
|||
u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
|
||||
bool use_seqnum_jbod_fp; /* Added for PD sequence */
|
||||
bool smp_affinity_enable;
|
||||
spinlock_t crashdump_lock;
|
||||
struct mutex crashdump_lock;
|
||||
|
||||
struct megasas_register_set __iomem *reg_set;
|
||||
u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
|
||||
|
|
|
@ -3271,14 +3271,13 @@ fw_crash_buffer_store(struct device *cdev,
|
|||
struct megasas_instance *instance =
|
||||
(struct megasas_instance *) shost->hostdata;
|
||||
int val = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (kstrtoint(buf, 0, &val) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&instance->crashdump_lock, flags);
|
||||
mutex_lock(&instance->crashdump_lock);
|
||||
instance->fw_crash_buffer_offset = val;
|
||||
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
|
||||
mutex_unlock(&instance->crashdump_lock);
|
||||
return strlen(buf);
|
||||
}
|
||||
|
||||
|
@ -3293,24 +3292,23 @@ fw_crash_buffer_show(struct device *cdev,
|
|||
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
|
||||
unsigned long chunk_left_bytes;
|
||||
unsigned long src_addr;
|
||||
unsigned long flags;
|
||||
u32 buff_offset;
|
||||
|
||||
spin_lock_irqsave(&instance->crashdump_lock, flags);
|
||||
mutex_lock(&instance->crashdump_lock);
|
||||
buff_offset = instance->fw_crash_buffer_offset;
|
||||
if (!instance->crash_dump_buf ||
|
||||
!((instance->fw_crash_state == AVAILABLE) ||
|
||||
(instance->fw_crash_state == COPYING))) {
|
||||
dev_err(&instance->pdev->dev,
|
||||
"Firmware crash dump is not available\n");
|
||||
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
|
||||
mutex_unlock(&instance->crashdump_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
|
||||
dev_err(&instance->pdev->dev,
|
||||
"Firmware crash dump offset is out of range\n");
|
||||
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
|
||||
mutex_unlock(&instance->crashdump_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3322,7 +3320,7 @@ fw_crash_buffer_show(struct device *cdev,
|
|||
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
|
||||
(buff_offset % dmachunk);
|
||||
memcpy(buf, (void *)src_addr, size);
|
||||
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
|
||||
mutex_unlock(&instance->crashdump_lock);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
@ -3347,7 +3345,6 @@ fw_crash_state_store(struct device *cdev,
|
|||
struct megasas_instance *instance =
|
||||
(struct megasas_instance *) shost->hostdata;
|
||||
int val = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (kstrtoint(buf, 0, &val) != 0)
|
||||
return -EINVAL;
|
||||
|
@ -3361,9 +3358,9 @@ fw_crash_state_store(struct device *cdev,
|
|||
instance->fw_crash_state = val;
|
||||
|
||||
if ((val == COPIED) || (val == COPY_ERROR)) {
|
||||
spin_lock_irqsave(&instance->crashdump_lock, flags);
|
||||
mutex_lock(&instance->crashdump_lock);
|
||||
megasas_free_host_crash_buffer(instance);
|
||||
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
|
||||
mutex_unlock(&instance->crashdump_lock);
|
||||
if (val == COPY_ERROR)
|
||||
dev_info(&instance->pdev->dev, "application failed to "
|
||||
"copy Firmware crash dump\n");
|
||||
|
@ -7422,7 +7419,7 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
|
|||
init_waitqueue_head(&instance->int_cmd_wait_q);
|
||||
init_waitqueue_head(&instance->abort_cmd_wait_q);
|
||||
|
||||
spin_lock_init(&instance->crashdump_lock);
|
||||
mutex_init(&instance->crashdump_lock);
|
||||
spin_lock_init(&instance->mfi_pool_lock);
|
||||
spin_lock_init(&instance->hba_lock);
|
||||
spin_lock_init(&instance->stream_lock);
|
||||
|
|
|
@ -4180,7 +4180,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
|
|||
payload.sas_identify.dev_type = SAS_END_DEVICE;
|
||||
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
|
||||
memcpy(payload.sas_identify.sas_addr,
|
||||
pm8001_ha->sas_addr, SAS_ADDR_SIZE);
|
||||
&pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
|
||||
payload.sas_identify.phy_id = phy_id;
|
||||
|
||||
return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
|
||||
|
|
|
@ -273,7 +273,6 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha);
|
||||
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
|
||||
|
||||
/**
|
||||
|
@ -294,13 +293,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
|
|||
pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n",
|
||||
pm8001_ha->chip->n_phy);
|
||||
|
||||
/* Setup Interrupt */
|
||||
rc = pm8001_setup_irq(pm8001_ha);
|
||||
if (rc) {
|
||||
pm8001_dbg(pm8001_ha, FAIL,
|
||||
"pm8001_setup_irq failed [ret: %d]\n", rc);
|
||||
goto err_out;
|
||||
}
|
||||
/* Request Interrupt */
|
||||
rc = pm8001_request_irq(pm8001_ha);
|
||||
if (rc)
|
||||
|
@ -1031,47 +1023,38 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
|
|||
}
|
||||
#endif
|
||||
|
||||
static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
|
||||
pdev = pm8001_ha->pdev;
|
||||
|
||||
#ifdef PM8001_USE_MSIX
|
||||
if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
|
||||
return pm8001_setup_msix(pm8001_ha);
|
||||
pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm8001_request_irq - register interrupt
|
||||
* @pm8001_ha: our ha struct.
|
||||
*/
|
||||
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
struct pci_dev *pdev = pm8001_ha->pdev;
|
||||
#ifdef PM8001_USE_MSIX
|
||||
int rc;
|
||||
|
||||
pdev = pm8001_ha->pdev;
|
||||
if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
|
||||
rc = pm8001_setup_msix(pm8001_ha);
|
||||
if (rc) {
|
||||
pm8001_dbg(pm8001_ha, FAIL,
|
||||
"pm8001_setup_irq failed [ret: %d]\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef PM8001_USE_MSIX
|
||||
if (pdev->msix_cap && pci_msi_enabled())
|
||||
return pm8001_request_msix(pm8001_ha);
|
||||
else {
|
||||
pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
|
||||
goto intx;
|
||||
if (pdev->msix_cap && pci_msi_enabled())
|
||||
return pm8001_request_msix(pm8001_ha);
|
||||
}
|
||||
|
||||
pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
|
||||
#endif
|
||||
|
||||
intx:
|
||||
/* initialize the INT-X interrupt */
|
||||
pm8001_ha->irq_vector[0].irq_id = 0;
|
||||
pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
|
||||
rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
|
||||
pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
|
||||
return rc;
|
||||
|
||||
return request_irq(pdev->irq, pm8001_interrupt_handler_intx,
|
||||
IRQF_SHARED, pm8001_ha->name,
|
||||
SHOST_TO_SAS_HA(pm8001_ha->shost));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -3671,10 +3671,12 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
|
|||
(struct set_ctrl_cfg_resp *)(piomb + 4);
|
||||
u32 status = le32_to_cpu(pPayload->status);
|
||||
u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
|
||||
u32 tag = le32_to_cpu(pPayload->tag);
|
||||
|
||||
pm8001_dbg(pm8001_ha, MSG,
|
||||
"SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
|
||||
status, err_qlfr_pgcd);
|
||||
pm8001_tag_free(pm8001_ha, tag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4671,7 +4673,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
|
|||
payload.sas_identify.dev_type = SAS_END_DEVICE;
|
||||
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
|
||||
memcpy(payload.sas_identify.sas_addr,
|
||||
&pm8001_ha->sas_addr, SAS_ADDR_SIZE);
|
||||
&pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
|
||||
payload.sas_identify.phy_id = phy_id;
|
||||
|
||||
return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
|
||||
|
|
|
@ -307,9 +307,9 @@ static int ppa_out(ppa_struct *dev, char *buffer, int len)
|
|||
case PPA_EPP_8:
|
||||
epp_reset(ppb);
|
||||
w_ctr(ppb, 0x4);
|
||||
if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x01))
|
||||
if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03))
|
||||
outsl(ppb + 4, buffer, len >> 2);
|
||||
else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x03))
|
||||
else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01))
|
||||
outsw(ppb + 4, buffer, len >> 1);
|
||||
else
|
||||
outsb(ppb + 4, buffer, len);
|
||||
|
|
|
@ -1904,6 +1904,7 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
|
|||
goto drop_rdata_kref;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fcport->rport_lock, flags);
|
||||
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
|
||||
test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
|
||||
test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
|
||||
|
@ -1911,17 +1912,20 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
|
|||
"io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
|
||||
io_req->xid, io_req->sc_cmd);
|
||||
rc = 1;
|
||||
spin_unlock_irqrestore(&fcport->rport_lock, flags);
|
||||
goto drop_rdata_kref;
|
||||
}
|
||||
|
||||
/* Set the command type to abort */
|
||||
io_req->cmd_type = QEDF_ABTS;
|
||||
spin_unlock_irqrestore(&fcport->rport_lock, flags);
|
||||
|
||||
kref_get(&io_req->refcount);
|
||||
|
||||
xid = io_req->xid;
|
||||
qedf->control_requests++;
|
||||
qedf->packet_aborts++;
|
||||
|
||||
/* Set the command type to abort */
|
||||
io_req->cmd_type = QEDF_ABTS;
|
||||
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
|
||||
|
||||
set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
|
||||
|
@ -2210,7 +2214,9 @@ process_els:
|
|||
refcount, fcport, fcport->rdata->ids.port_id);
|
||||
|
||||
/* Cleanup cmds re-use the same TID as the original I/O */
|
||||
spin_lock_irqsave(&fcport->rport_lock, flags);
|
||||
io_req->cmd_type = QEDF_CLEANUP;
|
||||
spin_unlock_irqrestore(&fcport->rport_lock, flags);
|
||||
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
|
||||
|
||||
init_completion(&io_req->cleanup_done);
|
||||
|
|
|
@ -2805,6 +2805,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
|
|||
struct qedf_ioreq *io_req;
|
||||
struct qedf_rport *fcport;
|
||||
u32 comp_type;
|
||||
u8 io_comp_type;
|
||||
unsigned long flags;
|
||||
|
||||
comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
|
||||
FCOE_CQE_CQE_TYPE_MASK;
|
||||
|
@ -2838,11 +2840,14 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fcport->rport_lock, flags);
|
||||
io_comp_type = io_req->cmd_type;
|
||||
spin_unlock_irqrestore(&fcport->rport_lock, flags);
|
||||
|
||||
switch (comp_type) {
|
||||
case FCOE_GOOD_COMPLETION_CQE_TYPE:
|
||||
atomic_inc(&fcport->free_sqes);
|
||||
switch (io_req->cmd_type) {
|
||||
switch (io_comp_type) {
|
||||
case QEDF_SCSI_CMD:
|
||||
qedf_scsi_completion(qedf, cqe, io_req);
|
||||
break;
|
||||
|
|
|
@ -116,7 +116,7 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
|
|||
|
||||
sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
|
||||
fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
|
||||
if (!fp->dfs_rport_dir)
|
||||
if (IS_ERR(fp->dfs_rport_dir))
|
||||
return;
|
||||
if (NVME_TARGET(vha->hw, fp))
|
||||
debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
|
||||
|
@ -708,14 +708,14 @@ create_nodes:
|
|||
if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
|
||||
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
|
||||
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
|
||||
if (!ha->tgt.dfs_naqp) {
|
||||
if (IS_ERR(ha->tgt.dfs_naqp)) {
|
||||
ql_log(ql_log_warn, vha, 0xd011,
|
||||
"Unable to create debugFS naqp node.\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
|
||||
if (!vha->dfs_rport_root) {
|
||||
if (IS_ERR(vha->dfs_rport_root)) {
|
||||
ql_log(ql_log_warn, vha, 0xd012,
|
||||
"Unable to create debugFS rports node.\n");
|
||||
goto out;
|
||||
|
|
|
@ -577,7 +577,7 @@ fcport_is_bigger(fc_port_t *fcport)
|
|||
static inline struct qla_qpair *
|
||||
qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
|
||||
{
|
||||
int cpuid = smp_processor_id();
|
||||
int cpuid = raw_smp_processor_id();
|
||||
|
||||
if (qpair->cpuid != cpuid &&
|
||||
ha->qp_cpu_map[cpuid]) {
|
||||
|
|
|
@ -3965,7 +3965,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
|
|||
if (!ha->flags.fw_started)
|
||||
return;
|
||||
|
||||
if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
|
||||
if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
|
||||
rsp->qpair->rcv_intr = 1;
|
||||
|
||||
if (!rsp->qpair->cpu_mapped)
|
||||
|
@ -4468,7 +4468,7 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
|
|||
}
|
||||
ha = qpair->hw;
|
||||
|
||||
queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
|
||||
queue_work(ha->wq, &qpair->q_work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -4494,7 +4494,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
|
|||
wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
|
||||
queue_work(ha->wq, &qpair->q_work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -399,14 +399,14 @@ static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
|
|||
nvme->u.nvme.dl = 0;
|
||||
nvme->u.nvme.timeout_sec = 0;
|
||||
nvme->u.nvme.cmd_dma = fd_resp->rspdma;
|
||||
nvme->u.nvme.cmd_len = fd_resp->rsplen;
|
||||
nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen);
|
||||
nvme->u.nvme.rsp_len = 0;
|
||||
nvme->u.nvme.rsp_dma = 0;
|
||||
nvme->u.nvme.exchange_address = uctx->exchange_address;
|
||||
nvme->u.nvme.nport_handle = uctx->nport_handle;
|
||||
nvme->u.nvme.ox_id = uctx->ox_id;
|
||||
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
|
||||
le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE);
|
||||
fd_resp->rsplen, DMA_TO_DEVICE);
|
||||
|
||||
ql_dbg(ql_dbg_unsol, vha, 0x2122,
|
||||
"Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
|
||||
|
@ -504,13 +504,13 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
|
|||
nvme->u.nvme.desc = fd;
|
||||
nvme->u.nvme.dir = 0;
|
||||
nvme->u.nvme.dl = 0;
|
||||
nvme->u.nvme.cmd_len = fd->rqstlen;
|
||||
nvme->u.nvme.rsp_len = fd->rsplen;
|
||||
nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen);
|
||||
nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen);
|
||||
nvme->u.nvme.rsp_dma = fd->rspdma;
|
||||
nvme->u.nvme.timeout_sec = fd->timeout;
|
||||
nvme->u.nvme.cmd_dma = fd->rqstdma;
|
||||
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
|
||||
le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE);
|
||||
fd->rqstlen, DMA_TO_DEVICE);
|
||||
|
||||
rval = qla2x00_start_sp(sp);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
|
|
|
@ -4425,8 +4425,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
|
|||
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
|
||||
} else if (ha->msix_count) {
|
||||
if (cmd->atio.u.isp24.fcp_cmnd.rddata)
|
||||
queue_work_on(smp_processor_id(), qla_tgt_wq,
|
||||
&cmd->work);
|
||||
queue_work(qla_tgt_wq, &cmd->work);
|
||||
else
|
||||
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
|
||||
&cmd->work);
|
||||
|
|
|
@ -310,7 +310,7 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
|
|||
cmd->trc_flags |= TRC_CMD_DONE;
|
||||
|
||||
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
|
||||
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
|
||||
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -547,7 +547,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
|
|||
cmd->trc_flags |= TRC_DATA_IN;
|
||||
cmd->cmd_in_wq = 1;
|
||||
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
|
||||
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
|
||||
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
|
||||
}
|
||||
|
||||
static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
|
||||
|
|
|
@ -1392,16 +1392,16 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
|
|||
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
|
||||
unsigned char buf[INQUIRY_VENDOR_LEN + 2];
|
||||
char *stripped = NULL;
|
||||
size_t len;
|
||||
ssize_t len;
|
||||
ssize_t ret;
|
||||
|
||||
len = strlcpy(buf, page, sizeof(buf));
|
||||
if (len < sizeof(buf)) {
|
||||
len = strscpy(buf, page, sizeof(buf));
|
||||
if (len > 0) {
|
||||
/* Strip any newline added from userspace. */
|
||||
stripped = strstrip(buf);
|
||||
len = strlen(stripped);
|
||||
}
|
||||
if (len > INQUIRY_VENDOR_LEN) {
|
||||
if (len < 0 || len > INQUIRY_VENDOR_LEN) {
|
||||
pr_err("Emulated T10 Vendor Identification exceeds"
|
||||
" INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
|
||||
"\n");
|
||||
|
@ -1448,16 +1448,16 @@ static ssize_t target_wwn_product_id_store(struct config_item *item,
|
|||
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
|
||||
unsigned char buf[INQUIRY_MODEL_LEN + 2];
|
||||
char *stripped = NULL;
|
||||
size_t len;
|
||||
ssize_t len;
|
||||
ssize_t ret;
|
||||
|
||||
len = strlcpy(buf, page, sizeof(buf));
|
||||
if (len < sizeof(buf)) {
|
||||
len = strscpy(buf, page, sizeof(buf));
|
||||
if (len > 0) {
|
||||
/* Strip any newline added from userspace. */
|
||||
stripped = strstrip(buf);
|
||||
len = strlen(stripped);
|
||||
}
|
||||
if (len > INQUIRY_MODEL_LEN) {
|
||||
if (len < 0 || len > INQUIRY_MODEL_LEN) {
|
||||
pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
|
||||
__stringify(INQUIRY_MODEL_LEN)
|
||||
"\n");
|
||||
|
@ -1504,16 +1504,16 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
|
|||
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
|
||||
unsigned char buf[INQUIRY_REVISION_LEN + 2];
|
||||
char *stripped = NULL;
|
||||
size_t len;
|
||||
ssize_t len;
|
||||
ssize_t ret;
|
||||
|
||||
len = strlcpy(buf, page, sizeof(buf));
|
||||
if (len < sizeof(buf)) {
|
||||
len = strscpy(buf, page, sizeof(buf));
|
||||
if (len > 0) {
|
||||
/* Strip any newline added from userspace. */
|
||||
stripped = strstrip(buf);
|
||||
len = strlen(stripped);
|
||||
}
|
||||
if (len > INQUIRY_REVISION_LEN) {
|
||||
if (len < 0 || len > INQUIRY_REVISION_LEN) {
|
||||
pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
|
||||
__stringify(INQUIRY_REVISION_LEN)
|
||||
"\n");
|
||||
|
|
|
@ -264,6 +264,7 @@ void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
|
|||
percpu_ref_put(&cmd_cnt->refcnt);
|
||||
|
||||
percpu_ref_exit(&cmd_cnt->refcnt);
|
||||
kfree(cmd_cnt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(target_free_cmd_counter);
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_driver.h>
|
||||
|
@ -2299,7 +2300,11 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
|
|||
*/
|
||||
static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
|
||||
{
|
||||
return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
|
||||
u32 val;
|
||||
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
|
||||
500, UIC_CMD_TIMEOUT * 1000, false, hba,
|
||||
REG_CONTROLLER_STATUS);
|
||||
return ret == 0 ? true : false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2392,7 +2397,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
|
|||
bool completion)
|
||||
{
|
||||
lockdep_assert_held(&hba->uic_cmd_mutex);
|
||||
lockdep_assert_held(hba->host->host_lock);
|
||||
|
||||
if (!ufshcd_ready_for_uic_cmd(hba)) {
|
||||
dev_err(hba->dev,
|
||||
|
@ -2419,7 +2423,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
|
|||
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
|
||||
return 0;
|
||||
|
@ -2428,9 +2431,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
|
|||
mutex_lock(&hba->uic_cmd_mutex);
|
||||
ufshcd_add_delay_before_dme_cmd(hba);
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
if (!ret)
|
||||
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
|
||||
|
||||
|
@ -4133,8 +4134,8 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
|||
wmb();
|
||||
reenable_intr = true;
|
||||
}
|
||||
ret = __ufshcd_send_uic_cmd(hba, cmd, false);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ret = __ufshcd_send_uic_cmd(hba, cmd, false);
|
||||
if (ret) {
|
||||
dev_err(hba->dev,
|
||||
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
|
||||
|
|
|
@ -53,10 +53,10 @@
|
|||
struct nvmefc_ls_req {
|
||||
void *rqstaddr;
|
||||
dma_addr_t rqstdma;
|
||||
__le32 rqstlen;
|
||||
u32 rqstlen;
|
||||
void *rspaddr;
|
||||
dma_addr_t rspdma;
|
||||
__le32 rsplen;
|
||||
u32 rsplen;
|
||||
u32 timeout;
|
||||
|
||||
void *private;
|
||||
|
@ -120,7 +120,7 @@ struct nvmefc_ls_req {
|
|||
struct nvmefc_ls_rsp {
|
||||
void *rspbuf;
|
||||
dma_addr_t rspdma;
|
||||
__le32 rsplen;
|
||||
u16 rsplen;
|
||||
|
||||
void (*done)(struct nvmefc_ls_rsp *rsp);
|
||||
void *nvme_fc_private; /* LLDD is not to access !! */
|
||||
|
|
Loading…
Reference in New Issue