mlx4_en: don't use napi_synchronize inside mlx4_en_netpoll
The mlx4 driver is triggering schedules while atomic inside mlx4_en_netpoll: spin_lock_irqsave(&cq->lock, flags); napi_synchronize(&cq->napi); ^^^^^ msleep here mlx4_en_process_rx_cq(dev, cq, 0); spin_unlock_irqrestore(&cq->lock, flags); This was part of a patch by Alexander Guller from Mellanox in 2011, but it still isn't upstream. Signed-off-by: Chris Mason <clm@fb.com> cc: stable@vger.kernel.org Acked-By: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b07afe07b1
commit
c98235cb85
|
@ -66,7 +66,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
|
|||
|
||||
cq->ring = ring;
|
||||
cq->is_tx = mode;
|
||||
spin_lock_init(&cq->lock);
|
||||
|
||||
/* Allocate HW buffers on provided NUMA node.
|
||||
* dev->numa_node is used in mtt range allocation flow.
|
||||
|
|
|
@ -1304,15 +1304,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
|
|||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_cq *cq;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
cq = priv->rx_cq[i];
|
||||
spin_lock_irqsave(&cq->lock, flags);
|
||||
napi_synchronize(&cq->napi);
|
||||
mlx4_en_process_rx_cq(dev, cq, 0);
|
||||
spin_unlock_irqrestore(&cq->lock, flags);
|
||||
napi_schedule(&cq->napi);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -319,7 +319,6 @@ struct mlx4_en_cq {
|
|||
struct mlx4_cq mcq;
|
||||
struct mlx4_hwq_resources wqres;
|
||||
int ring;
|
||||
spinlock_t lock;
|
||||
struct net_device *dev;
|
||||
struct napi_struct napi;
|
||||
int size;
|
||||
|
|
Loading…
Reference in New Issue