ath6kl: Fix random system lockup

The commit "ath6kl: Use a mutex_lock to avoid
race in diabling and handling irq" introduces a
state where ath6kl_sdio_irq_handler() would be waiting
to claim the sdio function for receive indefinitely
when things happen in the following order.

ath6kl_sdio_irq_handler()
	- aquires mtx_irq
	- sdio_release_host()
					ath6kl_sdio_irq_disable()
						- sdio_claim_host()
						- sleep on mtx_irq
	ath6kl_hif_intr_bh_handler()
		- (indefinitely) wait for the sdio
		  function to be released to exclusively claim
		  it again for receive operation.

Fix this by replacing the mtx_irq with an atomic
variable and a wait_queue.

kvalo: add ath6kl_sdio_is_on_irq() due to open parenthesis alignment

Signed-off-by: Raja Mani <rmani@qca.qualcomm.com>
Signed-off-by: Vasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
This commit is contained in:
Raja Mani 2012-02-09 12:57:12 +05:30 committed by Kalle Valo
parent af840ba7e2
commit d1f4159723
1 changed files with 26 additions and 8 deletions

View File

@ -50,8 +50,8 @@ struct ath6kl_sdio {
/* scatter request list head */
struct list_head scat_req;
/* Avoids disabling irq while the interrupts being handled */
struct mutex mtx_irq;
atomic_t irq_handling;
wait_queue_head_t irq_wq;
spinlock_t scat_lock;
bool scatter_enabled;
@ -463,7 +463,7 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
ar_sdio = sdio_get_drvdata(func);
mutex_lock(&ar_sdio->mtx_irq);
atomic_set(&ar_sdio->irq_handling, 1);
/*
* Release the host during interrups so we can pick it back up when
* we process commands.
@ -472,7 +472,10 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
sdio_claim_host(ar_sdio->func);
mutex_unlock(&ar_sdio->mtx_irq);
atomic_set(&ar_sdio->irq_handling, 0);
wake_up(&ar_sdio->irq_wq);
WARN_ON(status && status != -ECANCELED);
}
@ -573,6 +576,13 @@ static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
sdio_release_host(ar_sdio->func);
}
static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
return !atomic_read(&ar_sdio->irq_handling);
}
static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
@ -580,14 +590,21 @@ static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
sdio_claim_host(ar_sdio->func);
mutex_lock(&ar_sdio->mtx_irq);
if (atomic_read(&ar_sdio->irq_handling)) {
sdio_release_host(ar_sdio->func);
ret = wait_event_interruptible(ar_sdio->irq_wq,
ath6kl_sdio_is_on_irq(ar));
if (ret)
return;
sdio_claim_host(ar_sdio->func);
}
ret = sdio_release_irq(ar_sdio->func);
if (ret)
ath6kl_err("Failed to release sdio irq: %d\n", ret);
mutex_unlock(&ar_sdio->mtx_irq);
sdio_release_host(ar_sdio->func);
}
@ -1288,7 +1305,6 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
spin_lock_init(&ar_sdio->scat_lock);
spin_lock_init(&ar_sdio->wr_async_lock);
mutex_init(&ar_sdio->dma_buffer_mutex);
mutex_init(&ar_sdio->mtx_irq);
INIT_LIST_HEAD(&ar_sdio->scat_req);
INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
@ -1296,6 +1312,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
init_waitqueue_head(&ar_sdio->irq_wq);
for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);