ipip: get rid of ipip_lock
As RTNL is held while doing tunnels inserts and deletes, we can remove ipip_lock spinlock. My initial RCU conversion was conservative and converted the rwlock to spinlock, with no RTNL requirement. Use appropriate rcu annotations and modern lockdep checks as well. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
7dff59efbb
commit
b7285b7912
|
@ -122,11 +122,11 @@
|
||||||
|
|
||||||
static int ipip_net_id __read_mostly;
|
static int ipip_net_id __read_mostly;
|
||||||
struct ipip_net {
|
struct ipip_net {
|
||||||
struct ip_tunnel *tunnels_r_l[HASH_SIZE];
|
struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
|
||||||
struct ip_tunnel *tunnels_r[HASH_SIZE];
|
struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
|
||||||
struct ip_tunnel *tunnels_l[HASH_SIZE];
|
struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
|
||||||
struct ip_tunnel *tunnels_wc[1];
|
struct ip_tunnel __rcu *tunnels_wc[1];
|
||||||
struct ip_tunnel **tunnels[4];
|
struct ip_tunnel __rcu **tunnels[4];
|
||||||
|
|
||||||
struct net_device *fb_tunnel_dev;
|
struct net_device *fb_tunnel_dev;
|
||||||
};
|
};
|
||||||
|
@ -135,9 +135,8 @@ static void ipip_tunnel_init(struct net_device *dev);
|
||||||
static void ipip_tunnel_setup(struct net_device *dev);
|
static void ipip_tunnel_setup(struct net_device *dev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Locking : hash tables are protected by RCU and a spinlock
|
* Locking : hash tables are protected by RCU and RTNL
|
||||||
*/
|
*/
|
||||||
static DEFINE_SPINLOCK(ipip_lock);
|
|
||||||
|
|
||||||
#define for_each_ip_tunnel_rcu(start) \
|
#define for_each_ip_tunnel_rcu(start) \
|
||||||
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
|
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
|
||||||
|
@ -145,8 +144,8 @@ static DEFINE_SPINLOCK(ipip_lock);
|
||||||
static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
|
static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
|
||||||
__be32 remote, __be32 local)
|
__be32 remote, __be32 local)
|
||||||
{
|
{
|
||||||
unsigned h0 = HASH(remote);
|
unsigned int h0 = HASH(remote);
|
||||||
unsigned h1 = HASH(local);
|
unsigned int h1 = HASH(local);
|
||||||
struct ip_tunnel *t;
|
struct ip_tunnel *t;
|
||||||
struct ipip_net *ipn = net_generic(net, ipip_net_id);
|
struct ipip_net *ipn = net_generic(net, ipip_net_id);
|
||||||
|
|
||||||
|
@ -169,12 +168,12 @@ static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
|
static struct ip_tunnel __rcu **__ipip_bucket(struct ipip_net *ipn,
|
||||||
struct ip_tunnel_parm *parms)
|
struct ip_tunnel_parm *parms)
|
||||||
{
|
{
|
||||||
__be32 remote = parms->iph.daddr;
|
__be32 remote = parms->iph.daddr;
|
||||||
__be32 local = parms->iph.saddr;
|
__be32 local = parms->iph.saddr;
|
||||||
unsigned h = 0;
|
unsigned int h = 0;
|
||||||
int prio = 0;
|
int prio = 0;
|
||||||
|
|
||||||
if (remote) {
|
if (remote) {
|
||||||
|
@ -188,7 +187,7 @@ static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
|
||||||
return &ipn->tunnels[prio][h];
|
return &ipn->tunnels[prio][h];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
|
static inline struct ip_tunnel __rcu **ipip_bucket(struct ipip_net *ipn,
|
||||||
struct ip_tunnel *t)
|
struct ip_tunnel *t)
|
||||||
{
|
{
|
||||||
return __ipip_bucket(ipn, &t->parms);
|
return __ipip_bucket(ipn, &t->parms);
|
||||||
|
@ -196,13 +195,14 @@ static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
|
||||||
|
|
||||||
static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
|
static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
|
||||||
{
|
{
|
||||||
struct ip_tunnel **tp;
|
struct ip_tunnel __rcu **tp;
|
||||||
|
struct ip_tunnel *iter;
|
||||||
|
|
||||||
for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) {
|
for (tp = ipip_bucket(ipn, t);
|
||||||
if (t == *tp) {
|
(iter = rtnl_dereference(*tp)) != NULL;
|
||||||
spin_lock_bh(&ipip_lock);
|
tp = &iter->next) {
|
||||||
*tp = t->next;
|
if (t == iter) {
|
||||||
spin_unlock_bh(&ipip_lock);
|
rcu_assign_pointer(*tp, t->next);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -210,12 +210,10 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
|
||||||
|
|
||||||
static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
|
static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
|
||||||
{
|
{
|
||||||
struct ip_tunnel **tp = ipip_bucket(ipn, t);
|
struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
|
||||||
|
|
||||||
spin_lock_bh(&ipip_lock);
|
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
|
||||||
t->next = *tp;
|
|
||||||
rcu_assign_pointer(*tp, t);
|
rcu_assign_pointer(*tp, t);
|
||||||
spin_unlock_bh(&ipip_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
|
static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
|
||||||
|
@ -223,12 +221,15 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
|
||||||
{
|
{
|
||||||
__be32 remote = parms->iph.daddr;
|
__be32 remote = parms->iph.daddr;
|
||||||
__be32 local = parms->iph.saddr;
|
__be32 local = parms->iph.saddr;
|
||||||
struct ip_tunnel *t, **tp, *nt;
|
struct ip_tunnel *t, *nt;
|
||||||
|
struct ip_tunnel __rcu **tp;
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
char name[IFNAMSIZ];
|
char name[IFNAMSIZ];
|
||||||
struct ipip_net *ipn = net_generic(net, ipip_net_id);
|
struct ipip_net *ipn = net_generic(net, ipip_net_id);
|
||||||
|
|
||||||
for (tp = __ipip_bucket(ipn, parms); (t = *tp) != NULL; tp = &t->next) {
|
for (tp = __ipip_bucket(ipn, parms);
|
||||||
|
(t = rtnl_dereference(*tp)) != NULL;
|
||||||
|
tp = &t->next) {
|
||||||
if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
|
if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
|
||||||
return t;
|
return t;
|
||||||
}
|
}
|
||||||
|
@ -268,16 +269,15 @@ failed_free:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* called with RTNL */
|
||||||
static void ipip_tunnel_uninit(struct net_device *dev)
|
static void ipip_tunnel_uninit(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct net *net = dev_net(dev);
|
struct net *net = dev_net(dev);
|
||||||
struct ipip_net *ipn = net_generic(net, ipip_net_id);
|
struct ipip_net *ipn = net_generic(net, ipip_net_id);
|
||||||
|
|
||||||
if (dev == ipn->fb_tunnel_dev) {
|
if (dev == ipn->fb_tunnel_dev)
|
||||||
spin_lock_bh(&ipip_lock);
|
rcu_assign_pointer(ipn->tunnels_wc[0], NULL);
|
||||||
ipn->tunnels_wc[0] = NULL;
|
else
|
||||||
spin_unlock_bh(&ipip_lock);
|
|
||||||
} else
|
|
||||||
ipip_tunnel_unlink(ipn, netdev_priv(dev));
|
ipip_tunnel_unlink(ipn, netdev_priv(dev));
|
||||||
dev_put(dev);
|
dev_put(dev);
|
||||||
}
|
}
|
||||||
|
@ -741,7 +741,7 @@ static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
|
||||||
iph->ihl = 5;
|
iph->ihl = 5;
|
||||||
|
|
||||||
dev_hold(dev);
|
dev_hold(dev);
|
||||||
ipn->tunnels_wc[0] = tunnel;
|
rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct xfrm_tunnel ipip_handler __read_mostly = {
|
static struct xfrm_tunnel ipip_handler __read_mostly = {
|
||||||
|
@ -760,11 +760,12 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
|
||||||
for (prio = 1; prio < 4; prio++) {
|
for (prio = 1; prio < 4; prio++) {
|
||||||
int h;
|
int h;
|
||||||
for (h = 0; h < HASH_SIZE; h++) {
|
for (h = 0; h < HASH_SIZE; h++) {
|
||||||
struct ip_tunnel *t = ipn->tunnels[prio][h];
|
struct ip_tunnel *t;
|
||||||
|
|
||||||
|
t = rtnl_dereference(ipn->tunnels[prio][h]);
|
||||||
while (t != NULL) {
|
while (t != NULL) {
|
||||||
unregister_netdevice_queue(t->dev, head);
|
unregister_netdevice_queue(t->dev, head);
|
||||||
t = t->next;
|
t = rtnl_dereference(t->next);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue