Merge branch 'vxlan-geneve-fix-hlist-corruption'
Jiri Benc says: ==================== vxlan, geneve: fix hlist corruption Fix memory corruption introduced with the support of both IPv4 and IPv6 sockets in a single device. The same bug is present in VXLAN and Geneve. ==================== Signed-off-by: Jiri Benc <jbenc@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
4b821cb200
|
@ -45,9 +45,17 @@ struct geneve_net {
|
|||
|
||||
static unsigned int geneve_net_id;
|
||||
|
||||
struct geneve_dev_node {
|
||||
struct hlist_node hlist;
|
||||
struct geneve_dev *geneve;
|
||||
};
|
||||
|
||||
/* Pseudo network device */
|
||||
struct geneve_dev {
|
||||
struct hlist_node hlist; /* vni hash table */
|
||||
struct geneve_dev_node hlist4; /* vni hash table for IPv4 socket */
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct geneve_dev_node hlist6; /* vni hash table for IPv6 socket */
|
||||
#endif
|
||||
struct net *net; /* netns for packet i/o */
|
||||
struct net_device *dev; /* netdev for geneve tunnel */
|
||||
struct ip_tunnel_info info;
|
||||
|
@ -123,16 +131,16 @@ static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
|
|||
__be32 addr, u8 vni[])
|
||||
{
|
||||
struct hlist_head *vni_list_head;
|
||||
struct geneve_dev *geneve;
|
||||
struct geneve_dev_node *node;
|
||||
__u32 hash;
|
||||
|
||||
/* Find the device for this VNI */
|
||||
hash = geneve_net_vni_hash(vni);
|
||||
vni_list_head = &gs->vni_list[hash];
|
||||
hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
|
||||
if (eq_tun_id_and_vni((u8 *)&geneve->info.key.tun_id, vni) &&
|
||||
addr == geneve->info.key.u.ipv4.dst)
|
||||
return geneve;
|
||||
hlist_for_each_entry_rcu(node, vni_list_head, hlist) {
|
||||
if (eq_tun_id_and_vni((u8 *)&node->geneve->info.key.tun_id, vni) &&
|
||||
addr == node->geneve->info.key.u.ipv4.dst)
|
||||
return node->geneve;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -142,16 +150,16 @@ static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs,
|
|||
struct in6_addr addr6, u8 vni[])
|
||||
{
|
||||
struct hlist_head *vni_list_head;
|
||||
struct geneve_dev *geneve;
|
||||
struct geneve_dev_node *node;
|
||||
__u32 hash;
|
||||
|
||||
/* Find the device for this VNI */
|
||||
hash = geneve_net_vni_hash(vni);
|
||||
vni_list_head = &gs->vni_list[hash];
|
||||
hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
|
||||
if (eq_tun_id_and_vni((u8 *)&geneve->info.key.tun_id, vni) &&
|
||||
ipv6_addr_equal(&addr6, &geneve->info.key.u.ipv6.dst))
|
||||
return geneve;
|
||||
hlist_for_each_entry_rcu(node, vni_list_head, hlist) {
|
||||
if (eq_tun_id_and_vni((u8 *)&node->geneve->info.key.tun_id, vni) &&
|
||||
ipv6_addr_equal(&addr6, &node->geneve->info.key.u.ipv6.dst))
|
||||
return node->geneve;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -591,6 +599,7 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
|
|||
{
|
||||
struct net *net = geneve->net;
|
||||
struct geneve_net *gn = net_generic(net, geneve_net_id);
|
||||
struct geneve_dev_node *node;
|
||||
struct geneve_sock *gs;
|
||||
__u8 vni[3];
|
||||
__u32 hash;
|
||||
|
@ -609,15 +618,20 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
|
|||
out:
|
||||
gs->collect_md = geneve->collect_md;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (ipv6)
|
||||
if (ipv6) {
|
||||
rcu_assign_pointer(geneve->sock6, gs);
|
||||
else
|
||||
node = &geneve->hlist6;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
rcu_assign_pointer(geneve->sock4, gs);
|
||||
node = &geneve->hlist4;
|
||||
}
|
||||
node->geneve = geneve;
|
||||
|
||||
tunnel_id_to_vni(geneve->info.key.tun_id, vni);
|
||||
hash = geneve_net_vni_hash(vni);
|
||||
hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
|
||||
hlist_add_head_rcu(&node->hlist, &gs->vni_list[hash]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -644,8 +658,10 @@ static int geneve_stop(struct net_device *dev)
|
|||
{
|
||||
struct geneve_dev *geneve = netdev_priv(dev);
|
||||
|
||||
if (!hlist_unhashed(&geneve->hlist))
|
||||
hlist_del_rcu(&geneve->hlist);
|
||||
hlist_del_init_rcu(&geneve->hlist4.hlist);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
hlist_del_init_rcu(&geneve->hlist6.hlist);
|
||||
#endif
|
||||
geneve_sock_release(geneve);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -229,25 +229,25 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
|
|||
static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
|
||||
__be32 vni)
|
||||
{
|
||||
struct vxlan_dev *vxlan;
|
||||
struct vxlan_dev_node *node;
|
||||
|
||||
/* For flow based devices, map all packets to VNI 0 */
|
||||
if (vs->flags & VXLAN_F_COLLECT_METADATA)
|
||||
vni = 0;
|
||||
|
||||
hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
|
||||
if (vxlan->default_dst.remote_vni != vni)
|
||||
hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
|
||||
if (node->vxlan->default_dst.remote_vni != vni)
|
||||
continue;
|
||||
|
||||
if (IS_ENABLED(CONFIG_IPV6)) {
|
||||
const struct vxlan_config *cfg = &vxlan->cfg;
|
||||
const struct vxlan_config *cfg = &node->vxlan->cfg;
|
||||
|
||||
if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) &&
|
||||
cfg->remote_ifindex != ifindex)
|
||||
continue;
|
||||
}
|
||||
|
||||
return vxlan;
|
||||
return node->vxlan;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -2387,17 +2387,22 @@ static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
|
|||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
hlist_del_init_rcu(&vxlan->hlist);
|
||||
hlist_del_init_rcu(&vxlan->hlist4.hlist);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
hlist_del_init_rcu(&vxlan->hlist6.hlist);
|
||||
#endif
|
||||
spin_unlock(&vn->sock_lock);
|
||||
}
|
||||
|
||||
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
|
||||
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
|
||||
struct vxlan_dev_node *node)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
__be32 vni = vxlan->default_dst.remote_vni;
|
||||
|
||||
node->vxlan = vxlan;
|
||||
spin_lock(&vn->sock_lock);
|
||||
hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
|
||||
hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
|
||||
spin_unlock(&vn->sock_lock);
|
||||
}
|
||||
|
||||
|
@ -2849,6 +2854,7 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
|
|||
{
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
struct vxlan_sock *vs = NULL;
|
||||
struct vxlan_dev_node *node;
|
||||
|
||||
if (!vxlan->cfg.no_share) {
|
||||
spin_lock(&vn->sock_lock);
|
||||
|
@ -2866,12 +2872,16 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
|
|||
if (IS_ERR(vs))
|
||||
return PTR_ERR(vs);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (ipv6)
|
||||
if (ipv6) {
|
||||
rcu_assign_pointer(vxlan->vn6_sock, vs);
|
||||
else
|
||||
node = &vxlan->hlist6;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
rcu_assign_pointer(vxlan->vn4_sock, vs);
|
||||
vxlan_vs_add_dev(vs, vxlan);
|
||||
node = &vxlan->hlist4;
|
||||
}
|
||||
vxlan_vs_add_dev(vs, vxlan, node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -221,9 +221,17 @@ struct vxlan_config {
|
|||
bool no_share;
|
||||
};
|
||||
|
||||
struct vxlan_dev_node {
|
||||
struct hlist_node hlist;
|
||||
struct vxlan_dev *vxlan;
|
||||
};
|
||||
|
||||
/* Pseudo network device */
|
||||
struct vxlan_dev {
|
||||
struct hlist_node hlist; /* vni hash table */
|
||||
struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */
|
||||
#endif
|
||||
struct list_head next; /* vxlan's per namespace list */
|
||||
struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
|
Loading…
Reference in New Issue