Merge branch '5.20/scsi-queue' into 6.0/scsi-fixes
Include commits that weren't submitted during the 6.0 merge window. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
27883605cf
|
@ -7153,22 +7153,18 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
|
|||
switch (instance->adapter_type) {
|
||||
case MFI_SERIES:
|
||||
if (megasas_alloc_mfi_ctrl_mem(instance))
|
||||
goto fail;
|
||||
return -ENOMEM;
|
||||
break;
|
||||
case AERO_SERIES:
|
||||
case VENTURA_SERIES:
|
||||
case THUNDERBOLT_SERIES:
|
||||
case INVADER_SERIES:
|
||||
if (megasas_alloc_fusion_context(instance))
|
||||
goto fail;
|
||||
return -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
kfree(instance->reply_map);
|
||||
instance->reply_map = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -5310,7 +5310,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
|
|||
if (!fusion->log_to_span) {
|
||||
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
|
||||
__func__, __LINE__);
|
||||
kfree(instance->ctrl_context);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6935,14 +6935,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
|
|||
|
||||
if (ha->flags.msix_enabled) {
|
||||
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
|
||||
if (IS_QLA2071(ha)) {
|
||||
/* 4 ports Baker: Enable Interrupt Handshake */
|
||||
icb->msix_atio = 0;
|
||||
icb->firmware_options_2 |= cpu_to_le32(BIT_26);
|
||||
} else {
|
||||
icb->msix_atio = cpu_to_le16(msix->entry);
|
||||
icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
|
||||
}
|
||||
icb->msix_atio = cpu_to_le16(msix->entry);
|
||||
icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
|
||||
ql_dbg(ql_dbg_init, vha, 0xf072,
|
||||
"Registering ICB vector 0x%x for atio que.\n",
|
||||
msix->entry);
|
||||
|
|
|
@ -111,7 +111,7 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
|
|||
}
|
||||
}
|
||||
|
||||
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
|
||||
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
|
||||
{
|
||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||
|
||||
|
@ -121,7 +121,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
|
|||
} else {
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
blk_mq_requeue_request(rq, true);
|
||||
|
||||
if (msecs) {
|
||||
blk_mq_requeue_request(rq, false);
|
||||
blk_mq_delay_kick_requeue_list(rq->q, msecs);
|
||||
} else
|
||||
blk_mq_requeue_request(rq, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -651,14 +656,6 @@ static unsigned int scsi_rq_err_bytes(const struct request *rq)
|
|||
return bytes;
|
||||
}
|
||||
|
||||
/* Helper for scsi_io_completion() when "reprep" action required. */
|
||||
static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
|
||||
struct request_queue *q)
|
||||
{
|
||||
/* A new command will be prepared and issued. */
|
||||
scsi_mq_requeue_cmd(cmd);
|
||||
}
|
||||
|
||||
static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct request *req = scsi_cmd_to_rq(cmd);
|
||||
|
@ -676,14 +673,21 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* When ALUA transition state is returned, reprep the cmd to
|
||||
* use the ALUA handler's transition timeout. Delay the reprep
|
||||
* 1 sec to avoid aggressive retries of the target in that
|
||||
* state.
|
||||
*/
|
||||
#define ALUA_TRANSITION_REPREP_DELAY 1000
|
||||
|
||||
/* Helper for scsi_io_completion() when special action required. */
|
||||
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
|
||||
{
|
||||
struct request_queue *q = cmd->device->request_queue;
|
||||
struct request *req = scsi_cmd_to_rq(cmd);
|
||||
int level = 0;
|
||||
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
|
||||
ACTION_DELAYED_RETRY} action;
|
||||
enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
|
||||
ACTION_RETRY, ACTION_DELAYED_RETRY} action;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
bool sense_valid;
|
||||
bool sense_current = true; /* false implies "deferred sense" */
|
||||
|
@ -772,8 +776,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
|
|||
action = ACTION_DELAYED_RETRY;
|
||||
break;
|
||||
case 0x0a: /* ALUA state transition */
|
||||
blk_stat = BLK_STS_TRANSPORT;
|
||||
fallthrough;
|
||||
action = ACTION_DELAYED_REPREP;
|
||||
break;
|
||||
default:
|
||||
action = ACTION_FAIL;
|
||||
break;
|
||||
|
@ -832,7 +836,10 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
|
|||
return;
|
||||
fallthrough;
|
||||
case ACTION_REPREP:
|
||||
scsi_io_completion_reprep(cmd, q);
|
||||
scsi_mq_requeue_cmd(cmd, 0);
|
||||
break;
|
||||
case ACTION_DELAYED_REPREP:
|
||||
scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
|
||||
break;
|
||||
case ACTION_RETRY:
|
||||
/* Retry the same command immediately */
|
||||
|
@ -926,7 +933,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
|
|||
* command block will be released and the queue function will be goosed. If we
|
||||
* are not done then we have to figure out what to do next:
|
||||
*
|
||||
* a) We can call scsi_io_completion_reprep(). The request will be
|
||||
* a) We can call scsi_mq_requeue_cmd(). The request will be
|
||||
* unprepared and put back on the queue. Then a new command will
|
||||
* be created for it. This should be used if we made forward
|
||||
* progress, or if we want to switch from READ(10) to READ(6) for
|
||||
|
@ -942,7 +949,6 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
|
|||
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
{
|
||||
int result = cmd->result;
|
||||
struct request_queue *q = cmd->device->request_queue;
|
||||
struct request *req = scsi_cmd_to_rq(cmd);
|
||||
blk_status_t blk_stat = BLK_STS_OK;
|
||||
|
||||
|
@ -979,7 +985,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|||
* request just queue the command up again.
|
||||
*/
|
||||
if (likely(result == 0))
|
||||
scsi_io_completion_reprep(cmd, q);
|
||||
scsi_mq_requeue_cmd(cmd, 0);
|
||||
else
|
||||
scsi_io_completion_action(cmd, result);
|
||||
}
|
||||
|
|
|
@ -2012,7 +2012,7 @@ static int storvsc_probe(struct hv_device *device,
|
|||
*/
|
||||
host_dev->handle_error_wq =
|
||||
alloc_ordered_workqueue("storvsc_error_wq_%d",
|
||||
WQ_MEM_RECLAIM,
|
||||
0,
|
||||
host->host_no);
|
||||
if (!host_dev->handle_error_wq) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -1711,7 +1711,7 @@ static struct exynos_ufs_uic_attr fsd_uic_attr = {
|
|||
.pa_dbg_option_suite = 0x2E820183,
|
||||
};
|
||||
|
||||
struct exynos_ufs_drv_data fsd_ufs_drvs = {
|
||||
static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
|
||||
.uic_attr = &fsd_uic_attr,
|
||||
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
|
||||
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
|
||||
|
|
|
@ -135,11 +135,7 @@ static inline u32 ufshci_version(u32 major, u32 minor)
|
|||
|
||||
#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
|
||||
|
||||
#define UFSHCD_ERROR_MASK (UIC_ERROR |\
|
||||
DEVICE_FATAL_ERROR |\
|
||||
CONTROLLER_FATAL_ERROR |\
|
||||
SYSTEM_BUS_FATAL_ERROR |\
|
||||
CRYPTO_ENGINE_FATAL_ERROR)
|
||||
#define UFSHCD_ERROR_MASK (UIC_ERROR | INT_FATAL_ERRORS)
|
||||
|
||||
#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
|
||||
CONTROLLER_FATAL_ERROR |\
|
||||
|
|
Loading…
Reference in New Issue