Merge patch series "ufs: core: mcq: Add ufshcd_abort() and error handler support in MCQ mode"
Bao D. Nguyen <quic_nguyenb@quicinc.com> says: This patch series enables support for ufshcd_abort() and error handler in MCQ mode. Link: https://lore.kernel.org/r/cover.1685396241.git.quic_nguyenb@quicinc.com Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
bc5fef0196
|
@ -12,6 +12,10 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include "ufshcd-priv.h"
|
||||
#include <linux/delay.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/iopoll.h>
|
||||
|
||||
#define MAX_QUEUE_SUP GENMASK(7, 0)
|
||||
#define UFS_MCQ_MIN_RW_QUEUES 2
|
||||
|
@ -27,6 +31,9 @@
|
|||
#define MCQ_ENTRY_SIZE_IN_DWORD 8
|
||||
#define CQE_UCD_BA GENMASK_ULL(63, 7)
|
||||
|
||||
/* Max mcq register polling time in microseconds */
|
||||
#define MCQ_POLL_US 500000
|
||||
|
||||
static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
|
||||
|
@ -269,16 +276,38 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
|
|||
}
|
||||
|
||||
static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq)
|
||||
struct ufs_hw_queue *hwq)
|
||||
{
|
||||
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
|
||||
int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
|
||||
|
||||
ufshcd_compl_one_cqe(hba, tag, cqe);
|
||||
if (cqe->command_desc_base_addr) {
|
||||
ufshcd_compl_one_cqe(hba, tag, cqe);
|
||||
/* After processed the cqe, mark it empty (invalid) entry */
|
||||
cqe->command_desc_base_addr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq)
|
||||
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 entries = hwq->max_entries;
|
||||
|
||||
spin_lock_irqsave(&hwq->cq_lock, flags);
|
||||
while (entries > 0) {
|
||||
ufshcd_mcq_process_cqe(hba, hwq);
|
||||
ufshcd_mcq_inc_cq_head_slot(hwq);
|
||||
entries--;
|
||||
}
|
||||
|
||||
ufshcd_mcq_update_cq_tail_slot(hwq);
|
||||
hwq->cq_head_slot = hwq->cq_tail_slot;
|
||||
spin_unlock_irqrestore(&hwq->cq_lock, flags);
|
||||
}
|
||||
|
||||
static unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq)
|
||||
{
|
||||
unsigned long completed_reqs = 0;
|
||||
|
||||
|
@ -294,7 +323,6 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
|
|||
|
||||
return completed_reqs;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
|
||||
|
||||
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq)
|
||||
|
@ -307,6 +335,7 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
|
|||
|
||||
return completed_reqs;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
|
||||
|
||||
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
|
||||
{
|
||||
|
@ -419,6 +448,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
|
|||
hwq->max_entries = hba->nutrs;
|
||||
spin_lock_init(&hwq->sq_lock);
|
||||
spin_lock_init(&hwq->cq_lock);
|
||||
mutex_init(&hwq->sq_mutex);
|
||||
}
|
||||
|
||||
/* The very first HW queue serves device commands */
|
||||
|
@ -429,3 +459,222 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
|
|||
host->host_tagset = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
|
||||
{
|
||||
void __iomem *reg;
|
||||
u32 id = hwq->id, val;
|
||||
int err;
|
||||
|
||||
writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
|
||||
reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
|
||||
err = read_poll_timeout(readl, val, val & SQ_STS, 20,
|
||||
MCQ_POLL_US, false, reg);
|
||||
if (err)
|
||||
dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
|
||||
__func__, id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
|
||||
{
|
||||
void __iomem *reg;
|
||||
u32 id = hwq->id, val;
|
||||
int err;
|
||||
|
||||
writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
|
||||
reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
|
||||
err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
|
||||
MCQ_POLL_US, false, reg);
|
||||
if (err)
|
||||
dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
|
||||
__func__, id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_mcq_sq_cleanup - Clean up submission queue resources
|
||||
* associated with the pending command.
|
||||
* @hba - per adapter instance.
|
||||
* @task_tag - The command's task tag.
|
||||
*
|
||||
* Returns 0 for success; error code otherwise.
|
||||
*/
|
||||
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
|
||||
{
|
||||
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
|
||||
struct scsi_cmnd *cmd = lrbp->cmd;
|
||||
struct ufs_hw_queue *hwq;
|
||||
void __iomem *reg, *opr_sqd_base;
|
||||
u32 nexus, id, val;
|
||||
int err;
|
||||
|
||||
if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
|
||||
if (!cmd)
|
||||
return -EINVAL;
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
|
||||
} else {
|
||||
hwq = hba->dev_cmd_queue;
|
||||
}
|
||||
|
||||
id = hwq->id;
|
||||
|
||||
mutex_lock(&hwq->sq_mutex);
|
||||
|
||||
/* stop the SQ fetching before working on it */
|
||||
err = ufshcd_mcq_sq_stop(hba, hwq);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
/* SQCTI = EXT_IID, IID, LUN, Task Tag */
|
||||
nexus = lrbp->lun << 8 | task_tag;
|
||||
opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
|
||||
writel(nexus, opr_sqd_base + REG_SQCTI);
|
||||
|
||||
/* SQRTCy.ICU = 1 */
|
||||
writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
|
||||
|
||||
/* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
|
||||
reg = opr_sqd_base + REG_SQRTS;
|
||||
err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
|
||||
MCQ_POLL_US, false, reg);
|
||||
if (err)
|
||||
dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
|
||||
__func__, id, task_tag,
|
||||
FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
|
||||
|
||||
if (ufshcd_mcq_sq_start(hba, hwq))
|
||||
err = -ETIMEDOUT;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&hwq->sq_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
|
||||
* Write the sqe's Command Type to 0xF. The host controller will not
|
||||
* fetch any sqe with Command Type = 0xF.
|
||||
*
|
||||
* @utrd - UTP Transfer Request Descriptor to be nullified.
|
||||
*/
|
||||
static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
|
||||
{
|
||||
u32 dword_0;
|
||||
|
||||
dword_0 = le32_to_cpu(utrd->header.dword_0);
|
||||
dword_0 &= ~UPIU_COMMAND_TYPE_MASK;
|
||||
dword_0 |= FIELD_PREP(UPIU_COMMAND_TYPE_MASK, 0xF);
|
||||
utrd->header.dword_0 = cpu_to_le32(dword_0);
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_mcq_sqe_search - Search for the command in the submission queue
|
||||
* If the command is in the submission queue and not issued to the device yet,
|
||||
* nullify the sqe so the host controller will skip fetching the sqe.
|
||||
*
|
||||
* @hba - per adapter instance.
|
||||
* @hwq - Hardware Queue to be searched.
|
||||
* @task_tag - The command's task tag.
|
||||
*
|
||||
* Returns true if the SQE containing the command is present in the SQ
|
||||
* (not fetched by the controller); returns false if the SQE is not in the SQ.
|
||||
*/
|
||||
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq, int task_tag)
|
||||
{
|
||||
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
|
||||
struct utp_transfer_req_desc *utrd;
|
||||
u32 mask = hwq->max_entries - 1;
|
||||
__le64 cmd_desc_base_addr;
|
||||
bool ret = false;
|
||||
u64 addr, match;
|
||||
u32 sq_head_slot;
|
||||
|
||||
mutex_lock(&hwq->sq_mutex);
|
||||
|
||||
ufshcd_mcq_sq_stop(hba, hwq);
|
||||
sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq);
|
||||
if (sq_head_slot == hwq->sq_tail_slot)
|
||||
goto out;
|
||||
|
||||
cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr;
|
||||
addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
|
||||
|
||||
while (sq_head_slot != hwq->sq_tail_slot) {
|
||||
utrd = hwq->sqe_base_addr +
|
||||
sq_head_slot * sizeof(struct utp_transfer_req_desc);
|
||||
match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
|
||||
if (addr == match) {
|
||||
ufshcd_mcq_nullify_sqe(utrd);
|
||||
ret = true;
|
||||
goto out;
|
||||
}
|
||||
sq_head_slot = (sq_head_slot + 1) & mask;
|
||||
}
|
||||
|
||||
out:
|
||||
ufshcd_mcq_sq_start(hba, hwq);
|
||||
mutex_unlock(&hwq->sq_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_mcq_abort - Abort the command in MCQ.
|
||||
* @cmd - The command to be aborted.
|
||||
*
|
||||
* Returns SUCCESS or FAILED error codes
|
||||
*/
|
||||
int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct Scsi_Host *host = cmd->device->host;
|
||||
struct ufs_hba *hba = shost_priv(host);
|
||||
int tag = scsi_cmd_to_rq(cmd)->tag;
|
||||
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
|
||||
struct ufs_hw_queue *hwq;
|
||||
int err = FAILED;
|
||||
|
||||
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
|
||||
dev_err(hba->dev,
|
||||
"%s: skip abort. cmd at tag %d already completed.\n",
|
||||
__func__, tag);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Skip task abort in case previous aborts failed and report failure */
|
||||
if (lrbp->req_abort_skip) {
|
||||
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
|
||||
__func__, tag);
|
||||
goto out;
|
||||
}
|
||||
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
|
||||
|
||||
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
|
||||
/*
|
||||
* Failure. The command should not be "stuck" in SQ for
|
||||
* a long time which resulted in command being aborted.
|
||||
*/
|
||||
dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
|
||||
__func__, hwq->id, tag);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The command is not in the submission queue, and it is not
|
||||
* in the completion queue either. Query the device to see if
|
||||
* the command is being processed in the device.
|
||||
*/
|
||||
if (ufshcd_try_to_abort_task(hba, tag)) {
|
||||
dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
|
||||
lrbp->req_abort_skip = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = SUCCESS;
|
||||
if (ufshcd_cmd_inflight(lrbp->cmd))
|
||||
ufshcd_release_scsi_cmd(hba, lrbp);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -71,12 +71,18 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
|
|||
void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
|
||||
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
|
||||
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
|
||||
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq);
|
||||
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
|
||||
struct request *req);
|
||||
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq);
|
||||
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq);
|
||||
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
|
||||
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
|
||||
int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
|
||||
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
|
||||
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp);
|
||||
|
||||
#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
|
||||
#define SD_ASCII_STD true
|
||||
|
@ -401,4 +407,12 @@ static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
|
|||
|
||||
return cqe + q->cq_head_slot;
|
||||
}
|
||||
|
||||
static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q)
|
||||
{
|
||||
u32 val = readl(q->mcq_sq_head);
|
||||
|
||||
return val / sizeof(struct utp_transfer_req_desc);
|
||||
}
|
||||
|
||||
#endif /* _UFSHCD_PRIV_H_ */
|
||||
|
|
|
@ -157,7 +157,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
|
|||
enum {
|
||||
UFSHCD_MAX_CHANNEL = 0,
|
||||
UFSHCD_MAX_ID = 1,
|
||||
UFSHCD_NUM_RESERVED = 1,
|
||||
UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
|
||||
UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
|
||||
};
|
||||
|
@ -285,7 +284,6 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
|
|||
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
|
||||
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
|
||||
struct ufs_vreg *vreg);
|
||||
static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
|
||||
static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
|
||||
bool enable);
|
||||
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
|
||||
|
@ -2953,13 +2951,50 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
|
|||
}
|
||||
|
||||
/*
|
||||
* Clear all the requests from the controller for which a bit has been set in
|
||||
* @mask and wait until the controller confirms that these requests have been
|
||||
* cleared.
|
||||
* Check with the block layer if the command is inflight
|
||||
* @cmd: command to check.
|
||||
*
|
||||
* Returns true if command is inflight; false if not.
|
||||
*/
|
||||
static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
|
||||
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
if (!cmd)
|
||||
return false;
|
||||
|
||||
rq = scsi_cmd_to_rq(cmd);
|
||||
if (!blk_mq_request_started(rq))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the pending command in the controller and wait until
|
||||
* the controller confirms that the command has been cleared.
|
||||
* @hba: per adapter instance
|
||||
* @task_tag: The tag number of the command to be cleared.
|
||||
*/
|
||||
static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
|
||||
{
|
||||
u32 mask = 1U << task_tag;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
/*
|
||||
* MCQ mode. Clean up the MCQ resources similar to
|
||||
* what the ufshcd_utrl_clear() does for SDB mode.
|
||||
*/
|
||||
err = ufshcd_mcq_sq_cleanup(hba, task_tag);
|
||||
if (err) {
|
||||
dev_err(hba->dev, "%s: failed tag=%d. err=%d\n",
|
||||
__func__, task_tag, err);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* clear outstanding transaction before retry */
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
|
@ -3060,7 +3095,16 @@ retry:
|
|||
err = -ETIMEDOUT;
|
||||
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
|
||||
__func__, lrbp->task_tag);
|
||||
if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) {
|
||||
|
||||
/* MCQ mode */
|
||||
if (is_mcq_enabled(hba)) {
|
||||
err = ufshcd_clear_cmd(hba, lrbp->task_tag);
|
||||
hba->dev_cmd.complete = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* SDB mode */
|
||||
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
|
||||
/* successfully cleared the command, retry if needed */
|
||||
err = -EAGAIN;
|
||||
/*
|
||||
|
@ -3822,10 +3866,8 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
|
|||
/* Configure UTRD with command descriptor base address */
|
||||
cmd_desc_element_addr =
|
||||
(cmd_desc_dma_addr + (cmd_desc_size * i));
|
||||
utrdlp[i].command_desc_base_addr_lo =
|
||||
cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
|
||||
utrdlp[i].command_desc_base_addr_hi =
|
||||
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
|
||||
utrdlp[i].command_desc_base_addr =
|
||||
cpu_to_le64(cmd_desc_element_addr);
|
||||
|
||||
/* Response upiu and prdt offset should be in double words */
|
||||
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
|
||||
|
@ -5370,8 +5412,8 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
|||
}
|
||||
|
||||
/* Release the resources allocated for processing a SCSI command. */
|
||||
static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
struct scsi_cmnd *cmd = lrbp->cmd;
|
||||
|
||||
|
@ -5485,6 +5527,57 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
|||
return completed_reqs != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
|
||||
* invoked from the error handler context or ufshcd_host_reset_and_restore()
|
||||
* to complete the pending transfers and free the resources associated with
|
||||
* the scsi command.
|
||||
*
|
||||
* @hba: per adapter instance
|
||||
* @force_compl: This flag is set to true when invoked
|
||||
* from ufshcd_host_reset_and_restore() in which case it requires special
|
||||
* handling because the host controller has been reset by ufshcd_hba_stop().
|
||||
*/
|
||||
static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
|
||||
bool force_compl)
|
||||
{
|
||||
struct ufs_hw_queue *hwq;
|
||||
struct ufshcd_lrb *lrbp;
|
||||
struct scsi_cmnd *cmd;
|
||||
unsigned long flags;
|
||||
u32 hwq_num, utag;
|
||||
int tag;
|
||||
|
||||
for (tag = 0; tag < hba->nutrs; tag++) {
|
||||
lrbp = &hba->lrb[tag];
|
||||
cmd = lrbp->cmd;
|
||||
if (!ufshcd_cmd_inflight(cmd) ||
|
||||
test_bit(SCMD_STATE_COMPLETE, &cmd->state))
|
||||
continue;
|
||||
|
||||
utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
|
||||
hwq_num = blk_mq_unique_tag_to_hwq(utag);
|
||||
hwq = &hba->uhq[hwq_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
|
||||
|
||||
if (force_compl) {
|
||||
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
|
||||
/*
|
||||
* For those cmds of which the cqes are not present
|
||||
* in the cq, complete them explicitly.
|
||||
*/
|
||||
if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
|
||||
spin_lock_irqsave(&hwq->cq_lock, flags);
|
||||
set_host_byte(cmd, DID_REQUEUE);
|
||||
ufshcd_release_scsi_cmd(hba, lrbp);
|
||||
scsi_done(cmd);
|
||||
spin_unlock_irqrestore(&hwq->cq_lock, flags);
|
||||
}
|
||||
} else {
|
||||
ufshcd_mcq_poll_cqe_lock(hba, hwq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_transfer_req_compl - handle SCSI and query command completion
|
||||
* @hba: per adapter instance
|
||||
|
@ -6049,9 +6142,13 @@ out:
|
|||
}
|
||||
|
||||
/* Complete requests that have door-bell cleared */
|
||||
static void ufshcd_complete_requests(struct ufs_hba *hba)
|
||||
static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
|
||||
{
|
||||
ufshcd_transfer_req_compl(hba);
|
||||
if (is_mcq_enabled(hba))
|
||||
ufshcd_mcq_compl_pending_transfer(hba, force_compl);
|
||||
else
|
||||
ufshcd_transfer_req_compl(hba);
|
||||
|
||||
ufshcd_tmc_handler(hba);
|
||||
}
|
||||
|
||||
|
@ -6292,18 +6389,36 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
|
|||
bool needs_reset = false;
|
||||
int tag, ret;
|
||||
|
||||
/* Clear pending transfer requests */
|
||||
for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
|
||||
ret = ufshcd_try_to_abort_task(hba, tag);
|
||||
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
|
||||
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
|
||||
ret ? "failed" : "succeeded");
|
||||
if (ret) {
|
||||
needs_reset = true;
|
||||
goto out;
|
||||
if (is_mcq_enabled(hba)) {
|
||||
struct ufshcd_lrb *lrbp;
|
||||
int tag;
|
||||
|
||||
for (tag = 0; tag < hba->nutrs; tag++) {
|
||||
lrbp = &hba->lrb[tag];
|
||||
if (!ufshcd_cmd_inflight(lrbp->cmd))
|
||||
continue;
|
||||
ret = ufshcd_try_to_abort_task(hba, tag);
|
||||
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
|
||||
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
|
||||
ret ? "failed" : "succeeded");
|
||||
if (ret) {
|
||||
needs_reset = true;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* Clear pending transfer requests */
|
||||
for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
|
||||
ret = ufshcd_try_to_abort_task(hba, tag);
|
||||
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
|
||||
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
|
||||
ret ? "failed" : "succeeded");
|
||||
if (ret) {
|
||||
needs_reset = true;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear pending task management requests */
|
||||
for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
|
||||
if (ufshcd_clear_tm_cmd(hba, tag)) {
|
||||
|
@ -6314,7 +6429,7 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
|
|||
|
||||
out:
|
||||
/* Complete the requests that are cleared by s/w */
|
||||
ufshcd_complete_requests(hba);
|
||||
ufshcd_complete_requests(hba, false);
|
||||
|
||||
return needs_reset;
|
||||
}
|
||||
|
@ -6354,7 +6469,7 @@ static void ufshcd_err_handler(struct work_struct *work)
|
|||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ufshcd_err_handling_prepare(hba);
|
||||
/* Complete requests that have door-bell cleared by h/w */
|
||||
ufshcd_complete_requests(hba);
|
||||
ufshcd_complete_requests(hba, false);
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
again:
|
||||
needs_restore = false;
|
||||
|
@ -6725,7 +6840,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
|
|||
ufshcd_mcq_write_cqis(hba, events, i);
|
||||
|
||||
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
|
||||
ufshcd_mcq_poll_cqe_nolock(hba, hwq);
|
||||
ufshcd_mcq_poll_cqe_lock(hba, hwq);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -7235,7 +7350,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
|||
unsigned long flags, pending_reqs = 0, not_cleared = 0;
|
||||
struct Scsi_Host *host;
|
||||
struct ufs_hba *hba;
|
||||
u32 pos;
|
||||
struct ufs_hw_queue *hwq;
|
||||
struct ufshcd_lrb *lrbp;
|
||||
u32 pos, not_cleared_mask = 0;
|
||||
int err;
|
||||
u8 resp = 0xF, lun;
|
||||
|
||||
|
@ -7250,6 +7367,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
for (pos = 0; pos < hba->nutrs; pos++) {
|
||||
lrbp = &hba->lrb[pos];
|
||||
if (ufshcd_cmd_inflight(lrbp->cmd) &&
|
||||
lrbp->lun == lun) {
|
||||
ufshcd_clear_cmd(hba, pos);
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
|
||||
ufshcd_mcq_poll_cqe_lock(hba, hwq);
|
||||
}
|
||||
}
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* clear the commands that were pending for corresponding LUN */
|
||||
spin_lock_irqsave(&hba->outstanding_lock, flags);
|
||||
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
|
||||
|
@ -7258,17 +7389,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
|||
hba->outstanding_reqs &= ~pending_reqs;
|
||||
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
|
||||
|
||||
if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
|
||||
spin_lock_irqsave(&hba->outstanding_lock, flags);
|
||||
not_cleared = pending_reqs &
|
||||
ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
hba->outstanding_reqs |= not_cleared;
|
||||
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
|
||||
for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
|
||||
if (ufshcd_clear_cmd(hba, pos) < 0) {
|
||||
spin_lock_irqsave(&hba->outstanding_lock, flags);
|
||||
not_cleared = 1U << pos &
|
||||
ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
hba->outstanding_reqs |= not_cleared;
|
||||
not_cleared_mask |= not_cleared;
|
||||
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
|
||||
|
||||
dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
|
||||
__func__, not_cleared);
|
||||
dev_err(hba->dev, "%s: failed to clear request %d\n",
|
||||
__func__, pos);
|
||||
}
|
||||
}
|
||||
__ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
|
||||
__ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
|
||||
|
||||
out:
|
||||
hba->req_abort_count = 0;
|
||||
|
@ -7306,7 +7440,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
|
|||
*
|
||||
* Returns zero on success, non-zero on failure
|
||||
*/
|
||||
static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
|
||||
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
|
||||
{
|
||||
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
|
||||
int err = 0;
|
||||
|
@ -7329,6 +7463,20 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
|
|||
*/
|
||||
dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
|
||||
__func__, tag);
|
||||
if (is_mcq_enabled(hba)) {
|
||||
/* MCQ mode */
|
||||
if (ufshcd_cmd_inflight(lrbp->cmd)) {
|
||||
/* sleep for max. 200us same delay as in SDB mode */
|
||||
usleep_range(100, 200);
|
||||
continue;
|
||||
}
|
||||
/* command completed already */
|
||||
dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
|
||||
__func__, tag);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Single Doorbell Mode */
|
||||
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
if (reg & (1 << tag)) {
|
||||
/* sleep for max. 200us to stabilize */
|
||||
|
@ -7365,7 +7513,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
|
|||
goto out;
|
||||
}
|
||||
|
||||
err = ufshcd_clear_cmds(hba, 1U << tag);
|
||||
err = ufshcd_clear_cmd(hba, tag);
|
||||
if (err)
|
||||
dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
|
||||
__func__, tag, err);
|
||||
|
@ -7394,13 +7542,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
|||
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
|
||||
|
||||
ufshcd_hold(hba);
|
||||
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
/* If command is already aborted/completed, return FAILED. */
|
||||
if (!(test_bit(tag, &hba->outstanding_reqs))) {
|
||||
dev_err(hba->dev,
|
||||
"%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
|
||||
__func__, tag, hba->outstanding_reqs, reg);
|
||||
goto release;
|
||||
|
||||
if (!is_mcq_enabled(hba)) {
|
||||
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
if (!test_bit(tag, &hba->outstanding_reqs)) {
|
||||
/* If command is already aborted/completed, return FAILED. */
|
||||
dev_err(hba->dev,
|
||||
"%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
|
||||
__func__, tag, hba->outstanding_reqs, reg);
|
||||
goto release;
|
||||
}
|
||||
}
|
||||
|
||||
/* Print Transfer Request of aborted task */
|
||||
|
@ -7425,7 +7576,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
|||
}
|
||||
hba->req_abort_count++;
|
||||
|
||||
if (!(reg & (1 << tag))) {
|
||||
if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
|
||||
/* only execute this code in single doorbell mode */
|
||||
dev_err(hba->dev,
|
||||
"%s: cmd was completed, but without a notifying intr, tag = %d",
|
||||
__func__, tag);
|
||||
|
@ -7451,6 +7603,12 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
|||
goto release;
|
||||
}
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
/* MCQ mode. Branch off to handle abort for mcq mode */
|
||||
err = ufshcd_mcq_abort(cmd);
|
||||
goto release;
|
||||
}
|
||||
|
||||
/* Skip task abort in case previous aborts failed and report failure */
|
||||
if (lrbp->req_abort_skip) {
|
||||
dev_err(hba->dev, "%s: skipping abort\n", __func__);
|
||||
|
@ -7506,7 +7664,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
|
|||
ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
|
||||
ufshcd_hba_stop(hba);
|
||||
hba->silence_err_logs = true;
|
||||
ufshcd_complete_requests(hba);
|
||||
ufshcd_complete_requests(hba, true);
|
||||
hba->silence_err_logs = false;
|
||||
|
||||
/* scale up clocks to max frequency before full reinitialization */
|
||||
|
|
|
@ -1556,7 +1556,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
|
|||
struct ufs_hw_queue *hwq = &hba->uhq[id];
|
||||
|
||||
ufshcd_mcq_write_cqis(hba, 0x1, id);
|
||||
ufshcd_mcq_poll_cqe_nolock(hba, hwq);
|
||||
ufshcd_mcq_poll_cqe_lock(hba, hwq);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -1087,6 +1087,7 @@ struct ufs_hba {
|
|||
* @cq_tail_slot: current slot to which CQ tail pointer is pointing
|
||||
* @cq_head_slot: current slot to which CQ head pointer is pointing
|
||||
* @cq_lock: Synchronize between multiple polling instances
|
||||
* @sq_mutex: prevent submission queue concurrent access
|
||||
*/
|
||||
struct ufs_hw_queue {
|
||||
void __iomem *mcq_sq_head;
|
||||
|
@ -1105,6 +1106,8 @@ struct ufs_hw_queue {
|
|||
u32 cq_tail_slot;
|
||||
u32 cq_head_slot;
|
||||
spinlock_t cq_lock;
|
||||
/* prevent concurrent access to submission queue */
|
||||
struct mutex sq_mutex;
|
||||
};
|
||||
|
||||
static inline bool is_mcq_enabled(struct ufs_hba *hba)
|
||||
|
@ -1240,7 +1243,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
|
|||
void ufshcd_hba_stop(struct ufs_hba *hba);
|
||||
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
|
||||
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
|
||||
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
|
||||
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq);
|
||||
void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
|
||||
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
|
||||
|
|
|
@ -99,6 +99,9 @@ enum {
|
|||
enum {
|
||||
REG_SQHP = 0x0,
|
||||
REG_SQTP = 0x4,
|
||||
REG_SQRTC = 0x8,
|
||||
REG_SQCTI = 0xC,
|
||||
REG_SQRTS = 0x10,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -111,12 +114,26 @@ enum {
|
|||
REG_CQIE = 0x4,
|
||||
};
|
||||
|
||||
enum {
|
||||
SQ_START = 0x0,
|
||||
SQ_STOP = 0x1,
|
||||
SQ_ICU = 0x2,
|
||||
};
|
||||
|
||||
enum {
|
||||
SQ_STS = 0x1,
|
||||
SQ_CUS = 0x2,
|
||||
};
|
||||
|
||||
#define SQ_ICU_ERR_CODE_MASK GENMASK(7, 4)
|
||||
#define UPIU_COMMAND_TYPE_MASK GENMASK(31, 28)
|
||||
#define UFS_MASK(mask, offset) ((mask) << (offset))
|
||||
|
||||
/* UFS Version 08h */
|
||||
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
|
||||
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
|
||||
|
||||
#define UFSHCD_NUM_RESERVED 1
|
||||
/*
|
||||
* Controller UFSHCI version
|
||||
* - 2.x and newer use the following scheme:
|
||||
|
@ -503,8 +520,7 @@ struct request_desc_header {
|
|||
/**
|
||||
* struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
|
||||
* @header: UTRD header DW-0 to DW-3
|
||||
* @command_desc_base_addr_lo: UCD base address low DW-4
|
||||
* @command_desc_base_addr_hi: UCD base address high DW-5
|
||||
* @command_desc_base_addr: UCD base address DW 4-5
|
||||
* @response_upiu_length: response UPIU length DW-6
|
||||
* @response_upiu_offset: response UPIU offset DW-6
|
||||
* @prd_table_length: Physical region descriptor length DW-7
|
||||
|
@ -516,8 +532,7 @@ struct utp_transfer_req_desc {
|
|||
struct request_desc_header header;
|
||||
|
||||
/* DW 4-5*/
|
||||
__le32 command_desc_base_addr_lo;
|
||||
__le32 command_desc_base_addr_hi;
|
||||
__le64 command_desc_base_addr;
|
||||
|
||||
/* DW 6 */
|
||||
__le16 response_upiu_length;
|
||||
|
|
Loading…
Reference in New Issue