mt76: mt7615: rework IRQ handling to prepare for MSI support

With MSI interrupts, IRQs must not be enabled from within the IRQ handler,
because that can lead to lost events.
Defer IRQ processing to a tasklet, which is also responsible for enabling
IRQs (to avoid race conditions against the handler)

Co-developed-by: Soul Huang <Soul.Huang@mediatek.com>
Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Soul Huang <soul.huang@mediatek.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
Felix Fietkau 2020-04-09 14:37:50 +02:00
parent 89829c9e65
commit 9b90ab32f8
4 changed files with 29 additions and 14 deletions

View File

@ -73,7 +73,8 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
spin_lock_irqsave(&dev->mmio.irq_lock, flags);
dev->mmio.irqmask &= ~clear;
dev->mmio.irqmask |= set;
mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
if (addr)
mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);

View File

@ -561,5 +561,7 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
spin_unlock_bh(&dev->token_lock);
idr_destroy(&dev->token);
tasklet_disable(&dev->irq_tasklet);
mt76_free_device(&dev->mt76);
}

View File

@ -80,30 +80,42 @@ mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
{
struct mt7615_dev *dev = dev_instance;
u32 intr;
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
tasklet_schedule(&dev->irq_tasklet);
return IRQ_HANDLED;
}
static void mt7615_irq_tasklet(unsigned long data)
{
struct mt7615_dev *dev = (struct mt7615_dev *)data;
u32 intr, mask = 0;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
intr &= dev->mt76.mmio.irqmask;
if (intr & MT_INT_TX_DONE_ALL) {
mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
mask |= MT_INT_TX_DONE_ALL;
napi_schedule(&dev->mt76.tx_napi);
}
if (intr & MT_INT_RX_DONE(0)) {
mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
mask |= MT_INT_RX_DONE(0);
napi_schedule(&dev->mt76.napi[0]);
}
if (intr & MT_INT_RX_DONE(1)) {
mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
mask |= MT_INT_RX_DONE(1);
napi_schedule(&dev->mt76.napi[1]);
}
@ -117,7 +129,7 @@ static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
}
}
return IRQ_HANDLED;
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
@ -154,6 +166,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
dev = container_of(mdev, struct mt7615_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
tasklet_init(&dev->irq_tasklet, mt7615_irq_tasklet, (unsigned long)dev);
dev->reg_map = map;
dev->ops = ops;

View File

@ -229,6 +229,8 @@ struct mt7615_dev {
struct mt76_phy mphy;
};
struct tasklet_struct irq_tasklet;
struct mt7615_phy phy;
u32 vif_mask;
u32 omac_mask;
@ -404,12 +406,9 @@ static inline bool is_mt7663(struct mt76_dev *dev)
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
{
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
}
mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
{
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
tasklet_schedule(&dev->irq_tasklet);
}
static inline bool mt7615_firmware_offload(struct mt7615_dev *dev)