Merge branch 'netns-opt'

Cong Wang says:

====================
net: some minor optimization for netns id

Cong Wang (2):
  vxlan: call peernet2id() in fdb notification
  netns: avoid disabling irq for netns id
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-09-04 11:39:59 -07:00
commit 6f2a802763
2 changed files with 17 additions and 22 deletions

View File

@ -287,7 +287,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (!net_eq(dev_net(vxlan->dev), vxlan->net) && if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
nla_put_s32(skb, NDA_LINK_NETNSID, nla_put_s32(skb, NDA_LINK_NETNSID,
peernet2id_alloc(dev_net(vxlan->dev), vxlan->net))) peernet2id(dev_net(vxlan->dev), vxlan->net)))
goto nla_put_failure; goto nla_put_failure;
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))

View File

@ -215,31 +215,29 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
*/ */
int peernet2id_alloc(struct net *net, struct net *peer) int peernet2id_alloc(struct net *net, struct net *peer)
{ {
unsigned long flags;
bool alloc; bool alloc;
int id; int id;
spin_lock_irqsave(&net->nsid_lock, flags); spin_lock_bh(&net->nsid_lock);
alloc = atomic_read(&peer->count) == 0 ? false : true; alloc = atomic_read(&peer->count) == 0 ? false : true;
id = __peernet2id_alloc(net, peer, &alloc); id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_irqrestore(&net->nsid_lock, flags); spin_unlock_bh(&net->nsid_lock);
if (alloc && id >= 0) if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id); rtnl_net_notifyid(net, RTM_NEWNSID, id);
return id; return id;
} }
EXPORT_SYMBOL(peernet2id_alloc);
/* This function returns, if assigned, the id of a peer netns. */ /* This function returns, if assigned, the id of a peer netns. */
int peernet2id(struct net *net, struct net *peer) int peernet2id(struct net *net, struct net *peer)
{ {
unsigned long flags;
int id; int id;
spin_lock_irqsave(&net->nsid_lock, flags); spin_lock_bh(&net->nsid_lock);
id = __peernet2id(net, peer); id = __peernet2id(net, peer);
spin_unlock_irqrestore(&net->nsid_lock, flags); spin_unlock_bh(&net->nsid_lock);
return id; return id;
} }
EXPORT_SYMBOL(peernet2id);
/* This function returns true is the peer netns has an id assigned into the /* This function returns true is the peer netns has an id assigned into the
* current netns. * current netns.
@ -251,18 +249,17 @@ bool peernet_has_id(struct net *net, struct net *peer)
struct net *get_net_ns_by_id(struct net *net, int id) struct net *get_net_ns_by_id(struct net *net, int id)
{ {
unsigned long flags;
struct net *peer; struct net *peer;
if (id < 0) if (id < 0)
return NULL; return NULL;
rcu_read_lock(); rcu_read_lock();
spin_lock_irqsave(&net->nsid_lock, flags); spin_lock_bh(&net->nsid_lock);
peer = idr_find(&net->netns_ids, id); peer = idr_find(&net->netns_ids, id);
if (peer) if (peer)
get_net(peer); get_net(peer);
spin_unlock_irqrestore(&net->nsid_lock, flags); spin_unlock_bh(&net->nsid_lock);
rcu_read_unlock(); rcu_read_unlock();
return peer; return peer;
@ -406,17 +403,17 @@ static void cleanup_net(struct work_struct *work)
for_each_net(tmp) { for_each_net(tmp) {
int id; int id;
spin_lock_irq(&tmp->nsid_lock); spin_lock_bh(&tmp->nsid_lock);
id = __peernet2id(tmp, net); id = __peernet2id(tmp, net);
if (id >= 0) if (id >= 0)
idr_remove(&tmp->netns_ids, id); idr_remove(&tmp->netns_ids, id);
spin_unlock_irq(&tmp->nsid_lock); spin_unlock_bh(&tmp->nsid_lock);
if (id >= 0) if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id); rtnl_net_notifyid(tmp, RTM_DELNSID, id);
} }
spin_lock_irq(&net->nsid_lock); spin_lock_bh(&net->nsid_lock);
idr_destroy(&net->netns_ids); idr_destroy(&net->netns_ids);
spin_unlock_irq(&net->nsid_lock); spin_unlock_bh(&net->nsid_lock);
} }
rtnl_unlock(); rtnl_unlock();
@ -544,7 +541,6 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
{ {
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1]; struct nlattr *tb[NETNSA_MAX + 1];
unsigned long flags;
struct net *peer; struct net *peer;
int nsid, err; int nsid, err;
@ -565,15 +561,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
if (IS_ERR(peer)) if (IS_ERR(peer))
return PTR_ERR(peer); return PTR_ERR(peer);
spin_lock_irqsave(&net->nsid_lock, flags); spin_lock_bh(&net->nsid_lock);
if (__peernet2id(net, peer) >= 0) { if (__peernet2id(net, peer) >= 0) {
spin_unlock_irqrestore(&net->nsid_lock, flags); spin_unlock_bh(&net->nsid_lock);
err = -EEXIST; err = -EEXIST;
goto out; goto out;
} }
err = alloc_netid(net, peer, nsid); err = alloc_netid(net, peer, nsid);
spin_unlock_irqrestore(&net->nsid_lock, flags); spin_unlock_bh(&net->nsid_lock);
if (err >= 0) { if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err); rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0; err = 0;
@ -695,11 +691,10 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
.idx = 0, .idx = 0,
.s_idx = cb->args[0], .s_idx = cb->args[0],
}; };
unsigned long flags;
spin_lock_irqsave(&net->nsid_lock, flags); spin_lock_bh(&net->nsid_lock);
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
spin_unlock_irqrestore(&net->nsid_lock, flags); spin_unlock_bh(&net->nsid_lock);
cb->args[0] = net_cb.idx; cb->args[0] = net_cb.idx;
return skb->len; return skb->len;