chelsio: cxgb: Replace the workqueue with threaded interrupt
The external interrupt (F_PL_INTR_EXT) needs to be handled in a process context and this is accomplished by utilizing a workqueue. The process context can also be provided by a threaded interrupt instead of a workqueue. The threaded interrupt can be used later for other interrupt related processing which require non-atomic context without using yet another workqueue. free_irq() also ensures that the thread is done which is currently missing (the worker could continue after the module has been removed). Save pending flags in pending_thread_intr. Use the same mechanism to disable F_PL_INTR_EXT as interrupt source like it is used before the worker is scheduled. Enable the interrupt again once t1_elmer0_ext_intr_handler() is done. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
462e99a18b
commit
fec7fa0a75
|
@ -238,7 +238,6 @@ struct adapter {
|
|||
int msg_enable;
|
||||
u32 mmio_len;
|
||||
|
||||
struct work_struct ext_intr_handler_task;
|
||||
struct adapter_params params;
|
||||
|
||||
/* Terminator modules. */
|
||||
|
@ -257,6 +256,7 @@ struct adapter {
|
|||
|
||||
/* guards async operations */
|
||||
spinlock_t async_lock ____cacheline_aligned;
|
||||
u32 pending_thread_intr;
|
||||
u32 slow_intr_mask;
|
||||
int t1powersave;
|
||||
};
|
||||
|
@ -334,8 +334,7 @@ void t1_interrupts_enable(adapter_t *adapter);
|
|||
void t1_interrupts_disable(adapter_t *adapter);
|
||||
void t1_interrupts_clear(adapter_t *adapter);
|
||||
int t1_elmer0_ext_intr_handler(adapter_t *adapter);
|
||||
void t1_elmer0_ext_intr(adapter_t *adapter);
|
||||
int t1_slow_intr_handler(adapter_t *adapter);
|
||||
irqreturn_t t1_slow_intr_handler(adapter_t *adapter);
|
||||
|
||||
int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
|
||||
const struct board_info *t1_get_board_info(unsigned int board_id);
|
||||
|
|
|
@ -211,9 +211,10 @@ static int cxgb_up(struct adapter *adapter)
|
|||
t1_interrupts_clear(adapter);
|
||||
|
||||
adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
|
||||
err = request_irq(adapter->pdev->irq, t1_interrupt,
|
||||
adapter->params.has_msi ? 0 : IRQF_SHARED,
|
||||
adapter->name, adapter);
|
||||
err = request_threaded_irq(adapter->pdev->irq, t1_interrupt,
|
||||
t1_interrupt_thread,
|
||||
adapter->params.has_msi ? 0 : IRQF_SHARED,
|
||||
adapter->name, adapter);
|
||||
if (err) {
|
||||
if (adapter->params.has_msi)
|
||||
pci_disable_msi(adapter->pdev);
|
||||
|
@ -916,41 +917,6 @@ static void mac_stats_task(struct work_struct *work)
|
|||
spin_unlock(&adapter->work_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Processes elmer0 external interrupts in process context.
|
||||
*/
|
||||
static void ext_intr_task(struct work_struct *work)
|
||||
{
|
||||
struct adapter *adapter =
|
||||
container_of(work, struct adapter, ext_intr_handler_task);
|
||||
|
||||
t1_elmer0_ext_intr_handler(adapter);
|
||||
|
||||
/* Now reenable external interrupts */
|
||||
spin_lock_irq(&adapter->async_lock);
|
||||
adapter->slow_intr_mask |= F_PL_INTR_EXT;
|
||||
writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
|
||||
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
|
||||
adapter->regs + A_PL_ENABLE);
|
||||
spin_unlock_irq(&adapter->async_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt-context handler for elmer0 external interrupts.
|
||||
*/
|
||||
void t1_elmer0_ext_intr(struct adapter *adapter)
|
||||
{
|
||||
/*
|
||||
* Schedule a task to handle external interrupts as we require
|
||||
* a process context. We disable EXT interrupts in the interim
|
||||
* and let the task reenable them when it's done.
|
||||
*/
|
||||
adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
|
||||
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
|
||||
adapter->regs + A_PL_ENABLE);
|
||||
schedule_work(&adapter->ext_intr_handler_task);
|
||||
}
|
||||
|
||||
void t1_fatal_err(struct adapter *adapter)
|
||||
{
|
||||
if (adapter->flags & FULL_INIT_DONE) {
|
||||
|
@ -1062,8 +1028,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
spin_lock_init(&adapter->async_lock);
|
||||
spin_lock_init(&adapter->mac_lock);
|
||||
|
||||
INIT_WORK(&adapter->ext_intr_handler_task,
|
||||
ext_intr_task);
|
||||
INIT_DELAYED_WORK(&adapter->stats_update_task,
|
||||
mac_stats_task);
|
||||
|
||||
|
|
|
@ -1619,11 +1619,38 @@ int t1_poll(struct napi_struct *napi, int budget)
|
|||
return work_done;
|
||||
}
|
||||
|
||||
irqreturn_t t1_interrupt_thread(int irq, void *data)
|
||||
{
|
||||
struct adapter *adapter = data;
|
||||
u32 pending_thread_intr;
|
||||
|
||||
spin_lock_irq(&adapter->async_lock);
|
||||
pending_thread_intr = adapter->pending_thread_intr;
|
||||
adapter->pending_thread_intr = 0;
|
||||
spin_unlock_irq(&adapter->async_lock);
|
||||
|
||||
if (!pending_thread_intr)
|
||||
return IRQ_NONE;
|
||||
|
||||
if (pending_thread_intr & F_PL_INTR_EXT)
|
||||
t1_elmer0_ext_intr_handler(adapter);
|
||||
|
||||
spin_lock_irq(&adapter->async_lock);
|
||||
adapter->slow_intr_mask |= F_PL_INTR_EXT;
|
||||
|
||||
writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
|
||||
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
|
||||
adapter->regs + A_PL_ENABLE);
|
||||
spin_unlock_irq(&adapter->async_lock);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
irqreturn_t t1_interrupt(int irq, void *data)
|
||||
{
|
||||
struct adapter *adapter = data;
|
||||
struct sge *sge = adapter->sge;
|
||||
int handled;
|
||||
irqreturn_t handled;
|
||||
|
||||
if (likely(responses_pending(adapter))) {
|
||||
writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
|
||||
|
@ -1645,10 +1672,10 @@ irqreturn_t t1_interrupt(int irq, void *data)
|
|||
handled = t1_slow_intr_handler(adapter);
|
||||
spin_unlock(&adapter->async_lock);
|
||||
|
||||
if (!handled)
|
||||
if (handled == IRQ_NONE)
|
||||
sge->stats.unhandled_irqs++;
|
||||
|
||||
return IRQ_RETVAL(handled != 0);
|
||||
return handled;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -74,6 +74,7 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *);
|
|||
int t1_sge_configure(struct sge *, struct sge_params *);
|
||||
int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
|
||||
void t1_sge_destroy(struct sge *);
|
||||
irqreturn_t t1_interrupt_thread(int irq, void *data);
|
||||
irqreturn_t t1_interrupt(int irq, void *cookie);
|
||||
int t1_poll(struct napi_struct *, int);
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ static int fpga_phy_intr_handler(adapter_t *adapter)
|
|||
/*
|
||||
* Slow path interrupt handler for FPGAs.
|
||||
*/
|
||||
static int fpga_slow_intr(adapter_t *adapter)
|
||||
static irqreturn_t fpga_slow_intr(adapter_t *adapter)
|
||||
{
|
||||
u32 cause = readl(adapter->regs + A_PL_CAUSE);
|
||||
|
||||
|
@ -238,7 +238,7 @@ static int fpga_slow_intr(adapter_t *adapter)
|
|||
if (cause)
|
||||
writel(cause, adapter->regs + A_PL_CAUSE);
|
||||
|
||||
return cause != 0;
|
||||
return cause == 0 ? IRQ_NONE : IRQ_HANDLED;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -842,13 +842,14 @@ void t1_interrupts_clear(adapter_t* adapter)
|
|||
/*
|
||||
* Slow path interrupt handler for ASICs.
|
||||
*/
|
||||
static int asic_slow_intr(adapter_t *adapter)
|
||||
static irqreturn_t asic_slow_intr(adapter_t *adapter)
|
||||
{
|
||||
u32 cause = readl(adapter->regs + A_PL_CAUSE);
|
||||
irqreturn_t ret = IRQ_HANDLED;
|
||||
|
||||
cause &= adapter->slow_intr_mask;
|
||||
if (!cause)
|
||||
return 0;
|
||||
return IRQ_NONE;
|
||||
if (cause & F_PL_INTR_SGE_ERR)
|
||||
t1_sge_intr_error_handler(adapter->sge);
|
||||
if (cause & F_PL_INTR_TP)
|
||||
|
@ -857,16 +858,25 @@ static int asic_slow_intr(adapter_t *adapter)
|
|||
t1_espi_intr_handler(adapter->espi);
|
||||
if (cause & F_PL_INTR_PCIX)
|
||||
t1_pci_intr_handler(adapter);
|
||||
if (cause & F_PL_INTR_EXT)
|
||||
t1_elmer0_ext_intr(adapter);
|
||||
if (cause & F_PL_INTR_EXT) {
|
||||
/* Wake the threaded interrupt to handle external interrupts as
|
||||
* we require a process context. We disable EXT interrupts in
|
||||
* the interim and let the thread reenable them when it's done.
|
||||
*/
|
||||
adapter->pending_thread_intr |= F_PL_INTR_EXT;
|
||||
adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
|
||||
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
|
||||
adapter->regs + A_PL_ENABLE);
|
||||
ret = IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
/* Clear the interrupts just processed. */
|
||||
writel(cause, adapter->regs + A_PL_CAUSE);
|
||||
readl(adapter->regs + A_PL_CAUSE); /* flush writes */
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int t1_slow_intr_handler(adapter_t *adapter)
|
||||
irqreturn_t t1_slow_intr_handler(adapter_t *adapter)
|
||||
{
|
||||
#ifdef CONFIG_CHELSIO_T1_1G
|
||||
if (!t1_is_asic(adapter))
|
||||
|
|
Loading…
Reference in New Issue