netpoll: Move rx enable/disable into __dev_close_many

Today netpoll_rx_enable and netpoll_rx_disable are called from
dev_close and and __dev_close, and not from dev_close_many.

Move the calls into __dev_close_many so that we have a single call
site to maintain, and so that dev_close_many gains this protection as
well.  Which importantly makes batched network device deletes safe.

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric W. Biederman 2014-03-27 15:38:17 -07:00 committed by David S. Miller
parent 944e294857
commit 3f4df2066b
1 changed files with 4 additions and 9 deletions

View File

@ -1313,6 +1313,9 @@ static int __dev_close_many(struct list_head *head)
might_sleep();
list_for_each_entry(dev, head, close_list) {
/* Temporarily disable netpoll until the interface is down */
netpoll_rx_disable(dev);
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
clear_bit(__LINK_STATE_START, &dev->state);
@ -1343,6 +1346,7 @@ static int __dev_close_many(struct list_head *head)
dev->flags &= ~IFF_UP;
net_dmaengine_put();
netpoll_rx_enable(dev);
}
return 0;
@ -1353,14 +1357,10 @@ static int __dev_close(struct net_device *dev)
int retval;
LIST_HEAD(single);
/* Temporarily disable netpoll until the interface is down */
netpoll_rx_disable(dev);
list_add(&dev->close_list, &single);
retval = __dev_close_many(&single);
list_del(&single);
netpoll_rx_enable(dev);
return retval;
}
@ -1398,14 +1398,9 @@ int dev_close(struct net_device *dev)
if (dev->flags & IFF_UP) {
LIST_HEAD(single);
/* Block netpoll rx while the interface is going down */
netpoll_rx_disable(dev);
list_add(&dev->close_list, &single);
dev_close_many(&single);
list_del(&single);
netpoll_rx_enable(dev);
}
return 0;
}