netpoll: Don't drop all received packets.

Change the strategy of netpoll from dropping all packets received
during netpoll_poll_dev to calling napi poll with a budget of 0
(to avoid processing drivers rx queue), and to ignore packets received
with netif_rx (those will safely be placed on the backlog queue).

All of the netpoll supporting drivers have been reviewed to ensure
either thay use netif_rx or that a budget of 0 is supported by their
napi poll routine and that a budget of 0 will not process the drivers
rx queues.

Not dropping packets makes NETPOLL_RX_DROP unnecesary so it is removed.

npinfo->rx_flags is removed  as rx_flags with just the NETPOLL_RX_ENABLED
flag becomes just a redundant mirror of list_empty(&npinfo->rx_np).

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric W. Biederman 2014-03-14 20:48:28 -07:00 committed by David S. Miller
parent ff60763143
commit b6bacd550c
2 changed files with 7 additions and 13 deletions

View File

@ -39,7 +39,6 @@ struct netpoll {
struct netpoll_info {
atomic_t refcnt;
unsigned long rx_flags;
spinlock_t rx_lock;
struct semaphore dev_lock;
struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
@ -99,7 +98,7 @@ static inline bool netpoll_rx_on(struct sk_buff *skb)
{
struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
return npinfo && (netpoll_rx_processing(npinfo) || npinfo->rx_flags);
return npinfo && netpoll_rx_processing(npinfo);
}
static inline bool netpoll_rx(struct sk_buff *skb)

View File

@ -51,8 +51,6 @@ static atomic_t trapped;
DEFINE_STATIC_SRCU(netpoll_srcu);
#define USEC_PER_POLL 50
#define NETPOLL_RX_ENABLED 1
#define NETPOLL_RX_DROP 2
#define MAX_SKB_SIZE \
(sizeof(struct ethhdr) + \
@ -193,7 +191,8 @@ static void netpoll_poll_dev(struct net_device *dev)
{
const struct net_device_ops *ops;
struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
int budget = 16;
bool rx_processing = netpoll_rx_processing(ni);
int budget = rx_processing? 16 : 0;
/* Don't do any rx activity if the dev_lock mutex is held
* the dev_open/close paths use this to block netpoll activity
@ -207,8 +206,8 @@ static void netpoll_poll_dev(struct net_device *dev)
return;
}
ni->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
if (rx_processing)
atomic_inc(&trapped);
ops = dev->netdev_ops;
if (!ops->ndo_poll_controller) {
@ -221,8 +220,8 @@ static void netpoll_poll_dev(struct net_device *dev)
poll_napi(dev, budget);
atomic_dec(&trapped);
ni->rx_flags &= ~NETPOLL_RX_DROP;
if (rx_processing)
atomic_dec(&trapped);
up(&ni->dev_lock);
@ -1050,7 +1049,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
goto out;
}
npinfo->rx_flags = 0;
INIT_LIST_HEAD(&npinfo->rx_np);
spin_lock_init(&npinfo->rx_lock);
@ -1076,7 +1074,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
if (np->rx_skb_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
list_add_tail(&np->rx, &npinfo->rx_np);
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
@ -1258,8 +1255,6 @@ void __netpoll_cleanup(struct netpoll *np)
if (!list_empty(&npinfo->rx_np)) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
list_del(&np->rx);
if (list_empty(&npinfo->rx_np))
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}