mmc: core: Fix recursive locking issue in CQE recovery path

Consider the following stack trace

-001|raw_spin_lock_irqsave
-002|mmc_blk_cqe_complete_rq
-003|__blk_mq_complete_request(inline)
-003|blk_mq_complete_request(rq)
-004|mmc_cqe_timed_out(inline)
-004|mmc_mq_timed_out

mmc_mq_timed_out acquires the queue_lock for the first
time. The mmc_blk_cqe_complete_rq function also tries to acquire
the same queue lock resulting in recursive locking where the task
is spinning for the same lock which it has already acquired leading
to watchdog bark.

Fix this issue with the lock only for the required critical section.

Cc: <stable@vger.kernel.org>
Fixes: 1e8e55b670 ("mmc: block: Add CQE support")
Suggested-by: Sahitya Tummala <stummala@codeaurora.org>
Signed-off-by: Sarthak Garg <sartgarg@codeaurora.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/1588868135-31783-1-git-send-email-vbadigan@codeaurora.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
This commit is contained in:
Sarthak Garg 2020-05-07 21:45:33 +05:30 committed by Ulf Hansson
parent e6bfb1bf00
commit 39a22f7374
1 changed files with 4 additions and 9 deletions

View File

@ -107,7 +107,7 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
case MMC_ISSUE_DCMD: case MMC_ISSUE_DCMD:
if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
if (recovery_needed) if (recovery_needed)
__mmc_cqe_recovery_notifier(mq); mmc_cqe_recovery_notifier(mrq);
return BLK_EH_RESET_TIMER; return BLK_EH_RESET_TIMER;
} }
/* No timeout (XXX: huh? comment doesn't make much sense) */ /* No timeout (XXX: huh? comment doesn't make much sense) */
@ -127,18 +127,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
struct mmc_card *card = mq->card; struct mmc_card *card = mq->card;
struct mmc_host *host = card->host; struct mmc_host *host = card->host;
unsigned long flags; unsigned long flags;
int ret; bool ignore_tout;
spin_lock_irqsave(&mq->lock, flags); spin_lock_irqsave(&mq->lock, flags);
ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
if (mq->recovery_needed || !mq->use_cqe || host->hsq_enabled)
ret = BLK_EH_RESET_TIMER;
else
ret = mmc_cqe_timed_out(req);
spin_unlock_irqrestore(&mq->lock, flags); spin_unlock_irqrestore(&mq->lock, flags);
return ret; return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
} }
static void mmc_mq_recovery_handler(struct work_struct *work) static void mmc_mq_recovery_handler(struct work_struct *work)