net: factorize sync-rcu call in unregister_netdevice_many
Add dev_close_many and dev_deactivate_many to factorize another sync-rcu operation on the netdevice unregister path. $ modprobe dummy numdummies=10000 $ ip link set dev dummy* up $ time rmmod dummy Without the patch With the patch real 0m 24.63s real 0m 5.15s user 0m 0.00s user 0m 0.00s sys 0m 6.05s sys 0m 5.14s Signed-off-by: Octavian Purdila <opurdila@ixiacom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c6c8fea297
commit
443457242b
|
@ -321,6 +321,7 @@ extern void dev_init_scheduler(struct net_device *dev);
|
||||||
extern void dev_shutdown(struct net_device *dev);
|
extern void dev_shutdown(struct net_device *dev);
|
||||||
extern void dev_activate(struct net_device *dev);
|
extern void dev_activate(struct net_device *dev);
|
||||||
extern void dev_deactivate(struct net_device *dev);
|
extern void dev_deactivate(struct net_device *dev);
|
||||||
|
extern void dev_deactivate_many(struct list_head *head);
|
||||||
extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
|
extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
|
||||||
struct Qdisc *qdisc);
|
struct Qdisc *qdisc);
|
||||||
extern void qdisc_reset(struct Qdisc *qdisc);
|
extern void qdisc_reset(struct Qdisc *qdisc);
|
||||||
|
|
124
net/core/dev.c
124
net/core/dev.c
|
@ -1222,52 +1222,90 @@ int dev_open(struct net_device *dev)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dev_open);
|
EXPORT_SYMBOL(dev_open);
|
||||||
|
|
||||||
static int __dev_close(struct net_device *dev)
|
static int __dev_close_many(struct list_head *head)
|
||||||
{
|
{
|
||||||
const struct net_device_ops *ops = dev->netdev_ops;
|
struct net_device *dev;
|
||||||
|
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
/*
|
list_for_each_entry(dev, head, unreg_list) {
|
||||||
* Tell people we are going down, so that they can
|
/*
|
||||||
* prepare to death, when device is still operating.
|
* Tell people we are going down, so that they can
|
||||||
*/
|
* prepare to death, when device is still operating.
|
||||||
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
|
*/
|
||||||
|
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
|
||||||
|
|
||||||
clear_bit(__LINK_STATE_START, &dev->state);
|
clear_bit(__LINK_STATE_START, &dev->state);
|
||||||
|
|
||||||
/* Synchronize to scheduled poll. We cannot touch poll list,
|
/* Synchronize to scheduled poll. We cannot touch poll list, it
|
||||||
* it can be even on different cpu. So just clear netif_running().
|
* can be even on different cpu. So just clear netif_running().
|
||||||
*
|
*
|
||||||
* dev->stop() will invoke napi_disable() on all of it's
|
* dev->stop() will invoke napi_disable() on all of it's
|
||||||
* napi_struct instances on this device.
|
* napi_struct instances on this device.
|
||||||
*/
|
*/
|
||||||
smp_mb__after_clear_bit(); /* Commit netif_running(). */
|
smp_mb__after_clear_bit(); /* Commit netif_running(). */
|
||||||
|
}
|
||||||
|
|
||||||
dev_deactivate(dev);
|
dev_deactivate_many(head);
|
||||||
|
|
||||||
|
list_for_each_entry(dev, head, unreg_list) {
|
||||||
|
const struct net_device_ops *ops = dev->netdev_ops;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Call the device specific close. This cannot fail.
|
||||||
|
* Only if device is UP
|
||||||
|
*
|
||||||
|
* We allow it to be called even after a DETACH hot-plug
|
||||||
|
* event.
|
||||||
|
*/
|
||||||
|
if (ops->ndo_stop)
|
||||||
|
ops->ndo_stop(dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Device is now down.
|
||||||
|
*/
|
||||||
|
|
||||||
|
dev->flags &= ~IFF_UP;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Shutdown NET_DMA
|
||||||
|
*/
|
||||||
|
net_dmaengine_put();
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __dev_close(struct net_device *dev)
|
||||||
|
{
|
||||||
|
LIST_HEAD(single);
|
||||||
|
|
||||||
|
list_add(&dev->unreg_list, &single);
|
||||||
|
return __dev_close_many(&single);
|
||||||
|
}
|
||||||
|
|
||||||
|
int dev_close_many(struct list_head *head)
|
||||||
|
{
|
||||||
|
struct net_device *dev, *tmp;
|
||||||
|
LIST_HEAD(tmp_list);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(dev, tmp, head, unreg_list)
|
||||||
|
if (!(dev->flags & IFF_UP))
|
||||||
|
list_move(&dev->unreg_list, &tmp_list);
|
||||||
|
|
||||||
|
__dev_close_many(head);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call the device specific close. This cannot fail.
|
* Tell people we are down
|
||||||
* Only if device is UP
|
|
||||||
*
|
|
||||||
* We allow it to be called even after a DETACH hot-plug
|
|
||||||
* event.
|
|
||||||
*/
|
*/
|
||||||
if (ops->ndo_stop)
|
list_for_each_entry(dev, head, unreg_list) {
|
||||||
ops->ndo_stop(dev);
|
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
|
||||||
|
call_netdevice_notifiers(NETDEV_DOWN, dev);
|
||||||
/*
|
}
|
||||||
* Device is now down.
|
|
||||||
*/
|
|
||||||
|
|
||||||
dev->flags &= ~IFF_UP;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Shutdown NET_DMA
|
|
||||||
*/
|
|
||||||
net_dmaengine_put();
|
|
||||||
|
|
||||||
|
/* rollback_registered_many needs the complete original list */
|
||||||
|
list_splice(&tmp_list, head);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1282,16 +1320,10 @@ static int __dev_close(struct net_device *dev)
|
||||||
*/
|
*/
|
||||||
int dev_close(struct net_device *dev)
|
int dev_close(struct net_device *dev)
|
||||||
{
|
{
|
||||||
if (!(dev->flags & IFF_UP))
|
LIST_HEAD(single);
|
||||||
return 0;
|
|
||||||
|
|
||||||
__dev_close(dev);
|
list_add(&dev->unreg_list, &single);
|
||||||
|
dev_close_many(&single);
|
||||||
/*
|
|
||||||
* Tell people we are down
|
|
||||||
*/
|
|
||||||
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
|
|
||||||
call_netdevice_notifiers(NETDEV_DOWN, dev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -4963,10 +4995,12 @@ static void rollback_registered_many(struct list_head *head)
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(dev->reg_state != NETREG_REGISTERED);
|
BUG_ON(dev->reg_state != NETREG_REGISTERED);
|
||||||
|
}
|
||||||
|
|
||||||
/* If device is running, close it first. */
|
/* If device is running, close it first. */
|
||||||
dev_close(dev);
|
dev_close_many(head);
|
||||||
|
|
||||||
|
list_for_each_entry(dev, head, unreg_list) {
|
||||||
/* And unlink it from device chain. */
|
/* And unlink it from device chain. */
|
||||||
unlist_netdevice(dev);
|
unlist_netdevice(dev);
|
||||||
|
|
||||||
|
|
|
@ -810,20 +810,35 @@ static bool some_qdisc_is_busy(struct net_device *dev)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void dev_deactivate(struct net_device *dev)
|
void dev_deactivate_many(struct list_head *head)
|
||||||
{
|
{
|
||||||
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
|
struct net_device *dev;
|
||||||
if (dev_ingress_queue(dev))
|
|
||||||
dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
|
|
||||||
|
|
||||||
dev_watchdog_down(dev);
|
list_for_each_entry(dev, head, unreg_list) {
|
||||||
|
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
|
||||||
|
&noop_qdisc);
|
||||||
|
if (dev_ingress_queue(dev))
|
||||||
|
dev_deactivate_queue(dev, dev_ingress_queue(dev),
|
||||||
|
&noop_qdisc);
|
||||||
|
|
||||||
|
dev_watchdog_down(dev);
|
||||||
|
}
|
||||||
|
|
||||||
/* Wait for outstanding qdisc-less dev_queue_xmit calls. */
|
/* Wait for outstanding qdisc-less dev_queue_xmit calls. */
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
|
||||||
/* Wait for outstanding qdisc_run calls. */
|
/* Wait for outstanding qdisc_run calls. */
|
||||||
while (some_qdisc_is_busy(dev))
|
list_for_each_entry(dev, head, unreg_list)
|
||||||
yield();
|
while (some_qdisc_is_busy(dev))
|
||||||
|
yield();
|
||||||
|
}
|
||||||
|
|
||||||
|
void dev_deactivate(struct net_device *dev)
|
||||||
|
{
|
||||||
|
LIST_HEAD(single);
|
||||||
|
|
||||||
|
list_add(&dev->unreg_list, &single);
|
||||||
|
dev_deactivate_many(&single);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dev_init_scheduler_queue(struct net_device *dev,
|
static void dev_init_scheduler_queue(struct net_device *dev,
|
||||||
|
|
Loading…
Reference in New Issue