netpoll: drivers must not enable IRQ unconditionally in their NAPI handler
net/core/netpoll.c::netpoll_send_skb() calls the poll handler when
it is available. As netconsole can be used from almost any context,
IRQ must not be enabled blindly in the NAPI handler of a driver which
supports netpoll.
b57bd06655
fixed the issue for the
8139too.c driver.
Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
parent
79f3d3996f
commit
d15e9c4d9a
|
@ -617,13 +617,15 @@ rx_next:
|
||||||
* this round of polling
|
* this round of polling
|
||||||
*/
|
*/
|
||||||
if (rx_work) {
|
if (rx_work) {
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (cpr16(IntrStatus) & cp_rx_intr_mask)
|
if (cpr16(IntrStatus) & cp_rx_intr_mask)
|
||||||
goto rx_status_loop;
|
goto rx_status_loop;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_save(flags);
|
||||||
cpw16_f(IntrMask, cp_intr_mask);
|
cpw16_f(IntrMask, cp_intr_mask);
|
||||||
__netif_rx_complete(dev);
|
__netif_rx_complete(dev);
|
||||||
local_irq_enable();
|
local_irq_restore(flags);
|
||||||
|
|
||||||
return 0; /* done */
|
return 0; /* done */
|
||||||
}
|
}
|
||||||
|
|
|
@ -879,12 +879,14 @@ static int b44_poll(struct net_device *netdev, int *budget)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bp->istat & ISTAT_ERRORS) {
|
if (bp->istat & ISTAT_ERRORS) {
|
||||||
spin_lock_irq(&bp->lock);
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&bp->lock, flags);
|
||||||
b44_halt(bp);
|
b44_halt(bp);
|
||||||
b44_init_rings(bp);
|
b44_init_rings(bp);
|
||||||
b44_init_hw(bp, 1);
|
b44_init_hw(bp, 1);
|
||||||
netif_wake_queue(bp->dev);
|
netif_wake_queue(bp->dev);
|
||||||
spin_unlock_irq(&bp->lock);
|
spin_unlock_irqrestore(&bp->lock, flags);
|
||||||
done = 1;
|
done = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2576,14 +2576,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
|
||||||
int pkts, limit = min(*budget, dev->quota);
|
int pkts, limit = min(*budget, dev->quota);
|
||||||
struct fe_priv *np = netdev_priv(dev);
|
struct fe_priv *np = netdev_priv(dev);
|
||||||
u8 __iomem *base = get_hwbase(dev);
|
u8 __iomem *base = get_hwbase(dev);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
pkts = nv_rx_process(dev, limit);
|
pkts = nv_rx_process(dev, limit);
|
||||||
|
|
||||||
if (nv_alloc_rx(dev)) {
|
if (nv_alloc_rx(dev)) {
|
||||||
spin_lock_irq(&np->lock);
|
spin_lock_irqsave(&np->lock, flags);
|
||||||
if (!np->in_shutdown)
|
if (!np->in_shutdown)
|
||||||
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
|
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
|
||||||
spin_unlock_irq(&np->lock);
|
spin_unlock_irqrestore(&np->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pkts < limit) {
|
if (pkts < limit) {
|
||||||
|
@ -2591,13 +2592,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
|
||||||
netif_rx_complete(dev);
|
netif_rx_complete(dev);
|
||||||
|
|
||||||
/* re-enable receive interrupts */
|
/* re-enable receive interrupts */
|
||||||
spin_lock_irq(&np->lock);
|
spin_lock_irqsave(&np->lock, flags);
|
||||||
|
|
||||||
np->irqmask |= NVREG_IRQ_RX_ALL;
|
np->irqmask |= NVREG_IRQ_RX_ALL;
|
||||||
if (np->msi_flags & NV_MSI_X_ENABLED)
|
if (np->msi_flags & NV_MSI_X_ENABLED)
|
||||||
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
|
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
|
||||||
else
|
else
|
||||||
writel(np->irqmask, base + NvRegIrqMask);
|
writel(np->irqmask, base + NvRegIrqMask);
|
||||||
spin_unlock_irq(&np->lock);
|
|
||||||
|
spin_unlock_irqrestore(&np->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
/* used up our quantum, so reschedule */
|
/* used up our quantum, so reschedule */
|
||||||
|
|
|
@ -2920,6 +2920,7 @@ static int skge_poll(struct net_device *dev, int *budget)
|
||||||
struct skge_hw *hw = skge->hw;
|
struct skge_hw *hw = skge->hw;
|
||||||
struct skge_ring *ring = &skge->rx_ring;
|
struct skge_ring *ring = &skge->rx_ring;
|
||||||
struct skge_element *e;
|
struct skge_element *e;
|
||||||
|
unsigned long flags;
|
||||||
int to_do = min(dev->quota, *budget);
|
int to_do = min(dev->quota, *budget);
|
||||||
int work_done = 0;
|
int work_done = 0;
|
||||||
|
|
||||||
|
@ -2957,12 +2958,12 @@ static int skge_poll(struct net_device *dev, int *budget)
|
||||||
if (work_done >= to_do)
|
if (work_done >= to_do)
|
||||||
return 1; /* not done */
|
return 1; /* not done */
|
||||||
|
|
||||||
spin_lock_irq(&hw->hw_lock);
|
spin_lock_irqsave(&hw->hw_lock, flags);
|
||||||
__netif_rx_complete(dev);
|
__netif_rx_complete(dev);
|
||||||
hw->intr_mask |= irqmask[skge->port];
|
hw->intr_mask |= irqmask[skge->port];
|
||||||
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
||||||
skge_read32(hw, B0_IMSK);
|
skge_read32(hw, B0_IMSK);
|
||||||
spin_unlock_irq(&hw->hw_lock);
|
spin_unlock_irqrestore(&hw->hw_lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue