mt76: fix concurrent rx calls on A-MPDU release
Add a spinlock in mt76_rx_complete. Without this, multiple stats updates could happen in parallel, which can lead to deadlocks. There are probably more corner cases fixed by this change. Signed-off-by: Felix Fietkau <nbd@nbd.name> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
This commit is contained in:
parent
97389373d5
commit
c3d7c82a8b
|
@ -561,6 +561,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
|
|||
if (queue >= 0)
|
||||
napi = &dev->napi[queue];
|
||||
|
||||
spin_lock(&dev->rx_lock);
|
||||
while ((skb = __skb_dequeue(frames)) != NULL) {
|
||||
if (mt76_check_ccmp_pn(skb)) {
|
||||
dev_kfree_skb(skb);
|
||||
|
@ -570,6 +571,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
|
|||
sta = mt76_rx_convert(skb);
|
||||
ieee80211_rx_napi(dev->hw, sta, skb, napi);
|
||||
}
|
||||
spin_unlock(&dev->rx_lock);
|
||||
}
|
||||
|
||||
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
|
||||
|
|
|
@ -241,6 +241,7 @@ struct mt76_dev {
|
|||
struct device *dev;
|
||||
|
||||
struct net_device napi_dev;
|
||||
spinlock_t rx_lock;
|
||||
struct napi_struct napi[__MT_RXQ_MAX];
|
||||
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
|
||||
|
||||
|
|
|
@ -645,6 +645,7 @@ struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
|
|||
dev->mt76.drv = &drv_ops;
|
||||
mutex_init(&dev->mutex);
|
||||
spin_lock_init(&dev->irq_lock);
|
||||
spin_lock_init(&dev->mt76.rx_lock);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue