2012-12-07 08:04:48 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/igmp.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <linux/skbuff.h>
|
2012-12-12 06:23:08 +08:00
|
|
|
#include <linux/if_ether.h>
|
2012-12-07 08:04:48 +08:00
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/netlink.h>
|
2016-01-11 04:06:23 +08:00
|
|
|
#include <net/switchdev.h>
|
2012-12-07 08:04:48 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
#include <net/ipv6.h>
|
2013-09-04 08:13:39 +08:00
|
|
|
#include <net/addrconf.h>
|
2012-12-07 08:04:48 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "br_private.h"
|
|
|
|
|
|
|
|
static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = netdev_priv(dev);
|
|
|
|
struct net_bridge_port *p;
|
|
|
|
struct nlattr *nest;
|
|
|
|
|
|
|
|
if (!br->multicast_router || hlist_empty(&br->router_list))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nest = nla_nest_start(skb, MDBA_ROUTER);
|
|
|
|
if (nest == NULL)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
|
2012-12-07 08:04:48 +08:00
|
|
|
if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
nla_nest_cancel(skb, nest);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_bridge *br = netdev_priv(dev);
|
|
|
|
struct net_bridge_mdb_htable *mdb;
|
|
|
|
struct nlattr *nest, *nest2;
|
|
|
|
int i, err = 0;
|
|
|
|
int idx = 0, s_idx = cb->args[1];
|
|
|
|
|
|
|
|
if (br->multicast_disabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mdb = rcu_dereference(br->mdb);
|
|
|
|
if (!mdb)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nest = nla_nest_start(skb, MDBA_MDB);
|
|
|
|
if (nest == NULL)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
for (i = 0; i < mdb->max; i++) {
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
2013-08-05 08:19:38 +08:00
|
|
|
struct net_bridge_port_group *p;
|
|
|
|
struct net_bridge_port_group __rcu **pp;
|
2012-12-07 08:04:48 +08:00
|
|
|
struct net_bridge_port *port;
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
|
2012-12-07 08:04:48 +08:00
|
|
|
if (idx < s_idx)
|
|
|
|
goto skip;
|
|
|
|
|
|
|
|
nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
|
|
|
|
if (nest2 == NULL) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (pp = &mp->ports;
|
|
|
|
(p = rcu_dereference(*pp)) != NULL;
|
|
|
|
pp = &p->next) {
|
|
|
|
port = p->port;
|
|
|
|
if (port) {
|
|
|
|
struct br_mdb_entry e;
|
2013-03-09 13:52:19 +08:00
|
|
|
memset(&e, 0, sizeof(e));
|
2012-12-07 08:04:48 +08:00
|
|
|
e.ifindex = port->dev->ifindex;
|
2012-12-15 06:09:51 +08:00
|
|
|
e.state = p->state;
|
2015-07-10 23:02:08 +08:00
|
|
|
e.vid = p->addr.vid;
|
2012-12-18 19:54:08 +08:00
|
|
|
if (p->addr.proto == htons(ETH_P_IP))
|
|
|
|
e.addr.u.ip4 = p->addr.u.ip4;
|
2012-12-07 08:04:48 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2012-12-18 19:54:08 +08:00
|
|
|
if (p->addr.proto == htons(ETH_P_IPV6))
|
|
|
|
e.addr.u.ip6 = p->addr.u.ip6;
|
2012-12-07 08:04:48 +08:00
|
|
|
#endif
|
|
|
|
e.addr.proto = p->addr.proto;
|
|
|
|
if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
|
|
|
|
nla_nest_cancel(skb, nest2);
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, nest2);
|
|
|
|
skip:
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
cb->args[1] = idx;
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct net *net = sock_net(skb->sk);
|
|
|
|
struct nlmsghdr *nlh = NULL;
|
|
|
|
int idx = 0, s_idx;
|
|
|
|
|
|
|
|
s_idx = cb->args[0];
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
2012-12-10 10:15:35 +08:00
|
|
|
/* In theory this could be wrapped to 0... */
|
|
|
|
cb->seq = net->dev_base_seq + br_mdb_rehash_seq;
|
2012-12-07 08:04:48 +08:00
|
|
|
|
|
|
|
for_each_netdev_rcu(net, dev) {
|
|
|
|
if (dev->priv_flags & IFF_EBRIDGE) {
|
|
|
|
struct br_port_msg *bpm;
|
|
|
|
|
|
|
|
if (idx < s_idx)
|
|
|
|
goto skip;
|
|
|
|
|
|
|
|
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
|
|
|
|
cb->nlh->nlmsg_seq, RTM_GETMDB,
|
|
|
|
sizeof(*bpm), NLM_F_MULTI);
|
|
|
|
if (nlh == NULL)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bpm = nlmsg_data(nlh);
|
2013-03-09 13:52:19 +08:00
|
|
|
memset(bpm, 0, sizeof(*bpm));
|
2012-12-07 08:04:48 +08:00
|
|
|
bpm->ifindex = dev->ifindex;
|
|
|
|
if (br_mdb_fill_info(skb, cb, dev) < 0)
|
|
|
|
goto out;
|
|
|
|
if (br_rports_fill_info(skb, cb, dev) < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cb->args[1] = 0;
|
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
skip:
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (nlh)
|
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
rcu_read_unlock();
|
|
|
|
cb->args[0] = idx;
|
|
|
|
return skb->len;
|
|
|
|
}
|
|
|
|
|
2012-12-12 06:23:07 +08:00
|
|
|
static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
struct br_mdb_entry *entry, u32 pid,
|
|
|
|
u32 seq, int type, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct br_port_msg *bpm;
|
|
|
|
struct nlattr *nest, *nest2;
|
|
|
|
|
2015-04-29 00:33:48 +08:00
|
|
|
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
|
2012-12-12 06:23:07 +08:00
|
|
|
if (!nlh)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
bpm = nlmsg_data(nlh);
|
2013-03-09 13:52:19 +08:00
|
|
|
memset(bpm, 0, sizeof(*bpm));
|
2012-12-12 06:23:07 +08:00
|
|
|
bpm->family = AF_BRIDGE;
|
|
|
|
bpm->ifindex = dev->ifindex;
|
|
|
|
nest = nla_nest_start(skb, MDBA_MDB);
|
|
|
|
if (nest == NULL)
|
|
|
|
goto cancel;
|
|
|
|
nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
|
|
|
|
if (nest2 == NULL)
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
nla_nest_end(skb, nest2);
|
|
|
|
nla_nest_end(skb, nest);
|
2015-01-17 05:09:00 +08:00
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
return 0;
|
2012-12-12 06:23:07 +08:00
|
|
|
|
|
|
|
end:
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
cancel:
|
|
|
|
nlmsg_cancel(skb, nlh);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t rtnl_mdb_nlmsg_size(void)
|
|
|
|
{
|
|
|
|
return NLMSG_ALIGN(sizeof(struct br_port_msg))
|
|
|
|
+ nla_total_size(sizeof(struct br_mdb_entry));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
|
|
|
|
int type)
|
|
|
|
{
|
2016-01-11 04:06:23 +08:00
|
|
|
struct switchdev_obj_port_mdb mdb = {
|
|
|
|
.obj = {
|
|
|
|
.id = SWITCHDEV_OBJ_ID_PORT_MDB,
|
|
|
|
.flags = SWITCHDEV_F_DEFER,
|
|
|
|
},
|
|
|
|
.vid = entry->vid,
|
|
|
|
};
|
|
|
|
struct net_device *port_dev;
|
2012-12-12 06:23:07 +08:00
|
|
|
struct net *net = dev_net(dev);
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int err = -ENOBUFS;
|
|
|
|
|
2016-01-11 04:06:23 +08:00
|
|
|
port_dev = __dev_get_by_index(net, entry->ifindex);
|
|
|
|
if (entry->addr.proto == htons(ETH_P_IP))
|
|
|
|
ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
else
|
|
|
|
ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
mdb.obj.orig_dev = port_dev;
|
|
|
|
if (port_dev && type == RTM_NEWMDB)
|
|
|
|
switchdev_port_obj_add(port_dev, &mdb.obj);
|
|
|
|
else if (port_dev && type == RTM_DELMDB)
|
|
|
|
switchdev_port_obj_del(port_dev, &mdb.obj);
|
|
|
|
|
2012-12-12 06:23:07 +08:00
|
|
|
skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
|
|
|
|
if (err < 0) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
|
|
|
|
return;
|
|
|
|
errout:
|
|
|
|
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
|
2015-07-09 18:11:10 +08:00
|
|
|
struct br_ip *group, int type, u8 state)
|
2012-12-12 06:23:07 +08:00
|
|
|
{
|
|
|
|
struct br_mdb_entry entry;
|
|
|
|
|
2013-03-09 13:52:19 +08:00
|
|
|
memset(&entry, 0, sizeof(entry));
|
2012-12-12 06:23:07 +08:00
|
|
|
entry.ifindex = port->dev->ifindex;
|
|
|
|
entry.addr.proto = group->proto;
|
|
|
|
entry.addr.u.ip4 = group->u.ip4;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
entry.addr.u.ip6 = group->u.ip6;
|
|
|
|
#endif
|
2015-07-09 18:11:10 +08:00
|
|
|
entry.state = state;
|
2015-07-10 23:02:08 +08:00
|
|
|
entry.vid = group->vid;
|
2012-12-12 06:23:07 +08:00
|
|
|
__br_mdb_notify(dev, &entry, type);
|
|
|
|
}
|
|
|
|
|
2015-07-23 20:00:53 +08:00
|
|
|
static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
int ifindex, u32 pid,
|
|
|
|
u32 seq, int type, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct br_port_msg *bpm;
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct nlattr *nest;
|
|
|
|
|
|
|
|
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
|
|
|
|
if (!nlh)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
bpm = nlmsg_data(nlh);
|
|
|
|
memset(bpm, 0, sizeof(*bpm));
|
|
|
|
bpm->family = AF_BRIDGE;
|
|
|
|
bpm->ifindex = dev->ifindex;
|
|
|
|
nest = nla_nest_start(skb, MDBA_ROUTER);
|
|
|
|
if (!nest)
|
|
|
|
goto cancel;
|
|
|
|
|
|
|
|
if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
end:
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
cancel:
|
|
|
|
nlmsg_cancel(skb, nlh);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t rtnl_rtr_nlmsg_size(void)
|
|
|
|
{
|
|
|
|
return NLMSG_ALIGN(sizeof(struct br_port_msg))
|
|
|
|
+ nla_total_size(sizeof(__u32));
|
|
|
|
}
|
|
|
|
|
|
|
|
void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
|
|
|
|
int type)
|
|
|
|
{
|
|
|
|
struct net *net = dev_net(dev);
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int err = -ENOBUFS;
|
|
|
|
int ifindex;
|
|
|
|
|
|
|
|
ifindex = port ? port->dev->ifindex : 0;
|
|
|
|
skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
|
|
|
|
if (err < 0) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
|
|
|
|
return;
|
|
|
|
|
|
|
|
errout:
|
|
|
|
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
|
|
|
|
}
|
|
|
|
|
2012-12-12 06:23:08 +08:00
|
|
|
static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
|
|
|
|
{
|
|
|
|
if (entry->ifindex == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (entry->addr.proto == htons(ETH_P_IP)) {
|
|
|
|
if (!ipv4_is_multicast(entry->addr.u.ip4))
|
|
|
|
return false;
|
|
|
|
if (ipv4_is_local_multicast(entry->addr.u.ip4))
|
|
|
|
return false;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
|
2013-09-04 08:13:39 +08:00
|
|
|
if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
|
2012-12-12 06:23:08 +08:00
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
} else
|
|
|
|
return false;
|
2012-12-15 06:09:51 +08:00
|
|
|
if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
|
|
|
|
return false;
|
2015-07-10 23:02:08 +08:00
|
|
|
if (entry->vid >= VLAN_VID_MASK)
|
|
|
|
return false;
|
2012-12-12 06:23:08 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
struct net_device **pdev, struct br_mdb_entry **pentry)
|
|
|
|
{
|
|
|
|
struct net *net = sock_net(skb->sk);
|
|
|
|
struct br_mdb_entry *entry;
|
|
|
|
struct br_port_msg *bpm;
|
|
|
|
struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
|
|
|
|
struct net_device *dev;
|
|
|
|
int err;
|
|
|
|
|
2015-01-15 23:29:12 +08:00
|
|
|
err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL);
|
2012-12-12 06:23:08 +08:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
bpm = nlmsg_data(nlh);
|
|
|
|
if (bpm->ifindex == 0) {
|
|
|
|
pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev = __dev_get_by_index(net, bpm->ifindex);
|
|
|
|
if (dev == NULL) {
|
|
|
|
pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(dev->priv_flags & IFF_EBRIDGE)) {
|
|
|
|
pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pdev = dev;
|
|
|
|
|
|
|
|
if (!tb[MDBA_SET_ENTRY] ||
|
|
|
|
nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
|
|
|
|
pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = nla_data(tb[MDBA_SET_ENTRY]);
|
|
|
|
if (!is_valid_mdb_entry(entry)) {
|
|
|
|
pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pentry = entry;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
|
2012-12-15 06:09:51 +08:00
|
|
|
struct br_ip *group, unsigned char state)
|
2012-12-12 06:23:08 +08:00
|
|
|
{
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct net_bridge_port_group *p;
|
|
|
|
struct net_bridge_port_group __rcu **pp;
|
|
|
|
struct net_bridge_mdb_htable *mdb;
|
2015-07-06 20:53:35 +08:00
|
|
|
unsigned long now = jiffies;
|
2012-12-12 06:23:08 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
mdb = mlock_dereference(br->mdb, br);
|
|
|
|
mp = br_mdb_ip_get(mdb, group);
|
|
|
|
if (!mp) {
|
|
|
|
mp = br_multicast_new_group(br, port, group);
|
|
|
|
err = PTR_ERR(mp);
|
|
|
|
if (IS_ERR(mp))
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (pp = &mp->ports;
|
|
|
|
(p = mlock_dereference(*pp, br)) != NULL;
|
|
|
|
pp = &p->next) {
|
|
|
|
if (p->port == port)
|
|
|
|
return -EEXIST;
|
|
|
|
if ((unsigned long)p->port < (unsigned long)port)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-12-15 06:09:51 +08:00
|
|
|
p = br_multicast_new_port_group(port, group, *pp, state);
|
2012-12-12 06:23:08 +08:00
|
|
|
if (unlikely(!p))
|
|
|
|
return -ENOMEM;
|
|
|
|
rcu_assign_pointer(*pp, p);
|
2015-07-06 20:53:35 +08:00
|
|
|
if (state == MDB_TEMPORARY)
|
|
|
|
mod_timer(&p->timer, now + br->multicast_membership_interval);
|
2012-12-12 06:23:08 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __br_mdb_add(struct net *net, struct net_bridge *br,
|
|
|
|
struct br_mdb_entry *entry)
|
|
|
|
{
|
|
|
|
struct br_ip ip;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct net_bridge_port *p;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!netif_running(br->dev) || br->multicast_disabled)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = __dev_get_by_index(net, entry->ifindex);
|
|
|
|
if (!dev)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
p = br_port_get_rtnl(dev);
|
|
|
|
if (!p || p->br != br || p->state == BR_STATE_DISABLED)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-07-07 21:55:56 +08:00
|
|
|
memset(&ip, 0, sizeof(ip));
|
2015-07-10 23:02:08 +08:00
|
|
|
ip.vid = entry->vid;
|
2012-12-12 06:23:08 +08:00
|
|
|
ip.proto = entry->addr.proto;
|
|
|
|
if (ip.proto == htons(ETH_P_IP))
|
|
|
|
ip.u.ip4 = entry->addr.u.ip4;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
else
|
|
|
|
ip.u.ip6 = entry->addr.u.ip6;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
2012-12-15 06:09:51 +08:00
|
|
|
ret = br_mdb_add_group(br, p, &ip, entry->state);
|
2012-12-12 06:23:08 +08:00
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-03-21 15:45:29 +08:00
|
|
|
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
|
2012-12-12 06:23:08 +08:00
|
|
|
{
|
|
|
|
struct net *net = sock_net(skb->sk);
|
bridge: vlan: add per-vlan struct and move to rhashtables
This patch changes the bridge vlan implementation to use rhashtables
instead of bitmaps. The main motivation behind this change is that we
need extensible per-vlan structures (both per-port and global) so more
advanced features can be introduced and the vlan support can be
extended. I've tried to break this up but the moment net_port_vlans is
changed and the whole API goes away, thus this is a larger patch.
A few short goals of this patch are:
- Extensible per-vlan structs stored in rhashtables and a sorted list
- Keep user-visible behaviour (compressed vlans etc)
- Keep fastpath ingress/egress logic the same (optimizations to come
later)
Here's a brief list of some of the new features we'd like to introduce:
- per-vlan counters
- vlan ingress/egress mapping
- per-vlan igmp configuration
- vlan priorities
- avoid fdb entries replication (e.g. local fdb scaling issues)
The structure is kept single for both global and per-port entries so to
avoid code duplication where possible and also because we'll soon introduce
"port0 / aka bridge as port" which should simplify things further
(thanks to Vlad for the suggestion!).
Now we have per-vlan global rhashtable (bridge-wide) and per-vlan port
rhashtable, if an entry is added to a port it'll get a pointer to its
global context so it can be quickly accessed later. There's also a
sorted vlan list which is used for stable walks and some user-visible
behaviour such as the vlan ranges, also for error paths.
VLANs are stored in a "vlan group" which currently contains the
rhashtable, sorted vlan list and the number of "real" vlan entries.
A good side-effect of this change is that it resembles how hw keeps
per-vlan data.
One important note after this change is that if a VLAN is being looked up
in the bridge's rhashtable for filtering purposes (or to check if it's an
existing usable entry, not just a global context) then the new helper
br_vlan_should_use() needs to be used if the vlan is found. In case the
lookup is done only with a port's vlan group, then this check can be
skipped.
Things tested so far:
- basic vlan ingress/egress
- pvids
- untagged vlans
- undef CONFIG_BRIDGE_VLAN_FILTERING
- adding/deleting vlans in different scenarios (with/without global ctx,
while transmitting traffic, in ranges etc)
- loading/removing the module while having/adding/deleting vlans
- extracting bridge vlan information (user ABI), compressed requests
- adding/deleting fdbs on vlans
- bridge mac change, promisc mode
- default pvid change
- kmemleak ON during the whole time
Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-09-26 01:00:11 +08:00
|
|
|
struct net_bridge_vlan_group *vg;
|
2015-08-03 19:29:16 +08:00
|
|
|
struct net_device *dev, *pdev;
|
2012-12-12 06:23:08 +08:00
|
|
|
struct br_mdb_entry *entry;
|
2015-08-03 19:29:16 +08:00
|
|
|
struct net_bridge_port *p;
|
bridge: vlan: add per-vlan struct and move to rhashtables
This patch changes the bridge vlan implementation to use rhashtables
instead of bitmaps. The main motivation behind this change is that we
need extensible per-vlan structures (both per-port and global) so more
advanced features can be introduced and the vlan support can be
extended. I've tried to break this up but the moment net_port_vlans is
changed and the whole API goes away, thus this is a larger patch.
A few short goals of this patch are:
- Extensible per-vlan structs stored in rhashtables and a sorted list
- Keep user-visible behaviour (compressed vlans etc)
- Keep fastpath ingress/egress logic the same (optimizations to come
later)
Here's a brief list of some of the new features we'd like to introduce:
- per-vlan counters
- vlan ingress/egress mapping
- per-vlan igmp configuration
- vlan priorities
- avoid fdb entries replication (e.g. local fdb scaling issues)
The structure is kept single for both global and per-port entries so to
avoid code duplication where possible and also because we'll soon introduce
"port0 / aka bridge as port" which should simplify things further
(thanks to Vlad for the suggestion!).
Now we have per-vlan global rhashtable (bridge-wide) and per-vlan port
rhashtable, if an entry is added to a port it'll get a pointer to its
global context so it can be quickly accessed later. There's also a
sorted vlan list which is used for stable walks and some user-visible
behaviour such as the vlan ranges, also for error paths.
VLANs are stored in a "vlan group" which currently contains the
rhashtable, sorted vlan list and the number of "real" vlan entries.
A good side-effect of this change is that it resembles how hw keeps
per-vlan data.
One important note after this change is that if a VLAN is being looked up
in the bridge's rhashtable for filtering purposes (or to check if it's an
existing usable entry, not just a global context) then the new helper
br_vlan_should_use() needs to be used if the vlan is found. In case the
lookup is done only with a port's vlan group, then this check can be
skipped.
Things tested so far:
- basic vlan ingress/egress
- pvids
- untagged vlans
- undef CONFIG_BRIDGE_VLAN_FILTERING
- adding/deleting vlans in different scenarios (with/without global ctx,
while transmitting traffic, in ranges etc)
- loading/removing the module while having/adding/deleting vlans
- extracting bridge vlan information (user ABI), compressed requests
- adding/deleting fdbs on vlans
- bridge mac change, promisc mode
- default pvid change
- kmemleak ON during the whole time
Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-09-26 01:00:11 +08:00
|
|
|
struct net_bridge_vlan *v;
|
2012-12-12 06:23:08 +08:00
|
|
|
struct net_bridge *br;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = br_mdb_parse(skb, nlh, &dev, &entry);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
br = netdev_priv(dev);
|
|
|
|
|
2015-08-03 19:29:16 +08:00
|
|
|
/* If vlan filtering is enabled and VLAN is not specified
|
|
|
|
* install mdb entry on all vlans configured on the port.
|
|
|
|
*/
|
|
|
|
pdev = __dev_get_by_index(net, entry->ifindex);
|
|
|
|
if (!pdev)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
p = br_port_get_rtnl(pdev);
|
|
|
|
if (!p || p->br != br || p->state == BR_STATE_DISABLED)
|
|
|
|
return -EINVAL;
|
|
|
|
|
bridge: vlan: add per-vlan struct and move to rhashtables
This patch changes the bridge vlan implementation to use rhashtables
instead of bitmaps. The main motivation behind this change is that we
need extensible per-vlan structures (both per-port and global) so more
advanced features can be introduced and the vlan support can be
extended. I've tried to break this up but the moment net_port_vlans is
changed and the whole API goes away, thus this is a larger patch.
A few short goals of this patch are:
- Extensible per-vlan structs stored in rhashtables and a sorted list
- Keep user-visible behaviour (compressed vlans etc)
- Keep fastpath ingress/egress logic the same (optimizations to come
later)
Here's a brief list of some of the new features we'd like to introduce:
- per-vlan counters
- vlan ingress/egress mapping
- per-vlan igmp configuration
- vlan priorities
- avoid fdb entries replication (e.g. local fdb scaling issues)
The structure is kept single for both global and per-port entries so to
avoid code duplication where possible and also because we'll soon introduce
"port0 / aka bridge as port" which should simplify things further
(thanks to Vlad for the suggestion!).
Now we have per-vlan global rhashtable (bridge-wide) and per-vlan port
rhashtable, if an entry is added to a port it'll get a pointer to its
global context so it can be quickly accessed later. There's also a
sorted vlan list which is used for stable walks and some user-visible
behaviour such as the vlan ranges, also for error paths.
VLANs are stored in a "vlan group" which currently contains the
rhashtable, sorted vlan list and the number of "real" vlan entries.
A good side-effect of this change is that it resembles how hw keeps
per-vlan data.
One important note after this change is that if a VLAN is being looked up
in the bridge's rhashtable for filtering purposes (or to check if it's an
existing usable entry, not just a global context) then the new helper
br_vlan_should_use() needs to be used if the vlan is found. In case the
lookup is done only with a port's vlan group, then this check can be
skipped.
Things tested so far:
- basic vlan ingress/egress
- pvids
- untagged vlans
- undef CONFIG_BRIDGE_VLAN_FILTERING
- adding/deleting vlans in different scenarios (with/without global ctx,
while transmitting traffic, in ranges etc)
- loading/removing the module while having/adding/deleting vlans
- extracting bridge vlan information (user ABI), compressed requests
- adding/deleting fdbs on vlans
- bridge mac change, promisc mode
- default pvid change
- kmemleak ON during the whole time
Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-09-26 01:00:11 +08:00
|
|
|
vg = nbp_vlan_group(p);
|
|
|
|
if (br_vlan_enabled(br) && vg && entry->vid == 0) {
|
|
|
|
list_for_each_entry(v, &vg->vlan_list, vlist) {
|
|
|
|
entry->vid = v->vid;
|
2015-08-03 19:29:16 +08:00
|
|
|
err = __br_mdb_add(net, br, entry);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
__br_mdb_notify(dev, entry, RTM_NEWMDB);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = __br_mdb_add(net, br, entry);
|
|
|
|
if (!err)
|
|
|
|
__br_mdb_notify(dev, entry, RTM_NEWMDB);
|
|
|
|
}
|
|
|
|
|
2012-12-12 06:23:08 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
|
|
|
|
{
|
|
|
|
struct net_bridge_mdb_htable *mdb;
|
|
|
|
struct net_bridge_mdb_entry *mp;
|
|
|
|
struct net_bridge_port_group *p;
|
|
|
|
struct net_bridge_port_group __rcu **pp;
|
|
|
|
struct br_ip ip;
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
if (!netif_running(br->dev) || br->multicast_disabled)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-07-07 21:55:56 +08:00
|
|
|
memset(&ip, 0, sizeof(ip));
|
2015-07-10 23:02:08 +08:00
|
|
|
ip.vid = entry->vid;
|
2012-12-12 06:23:08 +08:00
|
|
|
ip.proto = entry->addr.proto;
|
2015-07-09 19:12:45 +08:00
|
|
|
if (ip.proto == htons(ETH_P_IP))
|
2012-12-12 06:23:08 +08:00
|
|
|
ip.u.ip4 = entry->addr.u.ip4;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2015-07-09 19:12:45 +08:00
|
|
|
else
|
2012-12-12 06:23:08 +08:00
|
|
|
ip.u.ip6 = entry->addr.u.ip6;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
spin_lock_bh(&br->multicast_lock);
|
|
|
|
mdb = mlock_dereference(br->mdb, br);
|
|
|
|
|
|
|
|
mp = br_mdb_ip_get(mdb, &ip);
|
|
|
|
if (!mp)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
for (pp = &mp->ports;
|
|
|
|
(p = mlock_dereference(*pp, br)) != NULL;
|
|
|
|
pp = &p->next) {
|
|
|
|
if (!p->port || p->port->dev->ifindex != entry->ifindex)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (p->port->state == BR_STATE_DISABLED)
|
|
|
|
goto unlock;
|
|
|
|
|
2015-07-28 19:10:44 +08:00
|
|
|
entry->state = p->state;
|
2012-12-12 06:23:08 +08:00
|
|
|
rcu_assign_pointer(*pp, p->next);
|
|
|
|
hlist_del_init(&p->mglist);
|
|
|
|
del_timer(&p->timer);
|
|
|
|
call_rcu_bh(&p->rcu, br_multicast_free_pg);
|
|
|
|
err = 0;
|
|
|
|
|
2013-10-20 06:58:57 +08:00
|
|
|
if (!mp->ports && !mp->mglist &&
|
2012-12-12 06:23:08 +08:00
|
|
|
netif_running(br->dev))
|
|
|
|
mod_timer(&mp->timer, jiffies);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
spin_unlock_bh(&br->multicast_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-03-21 15:45:29 +08:00
|
|
|
static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
|
2012-12-12 06:23:08 +08:00
|
|
|
{
|
2015-08-03 19:29:16 +08:00
|
|
|
struct net *net = sock_net(skb->sk);
|
bridge: vlan: add per-vlan struct and move to rhashtables
This patch changes the bridge vlan implementation to use rhashtables
instead of bitmaps. The main motivation behind this change is that we
need extensible per-vlan structures (both per-port and global) so more
advanced features can be introduced and the vlan support can be
extended. I've tried to break this up but the moment net_port_vlans is
changed and the whole API goes away, thus this is a larger patch.
A few short goals of this patch are:
- Extensible per-vlan structs stored in rhashtables and a sorted list
- Keep user-visible behaviour (compressed vlans etc)
- Keep fastpath ingress/egress logic the same (optimizations to come
later)
Here's a brief list of some of the new features we'd like to introduce:
- per-vlan counters
- vlan ingress/egress mapping
- per-vlan igmp configuration
- vlan priorities
- avoid fdb entries replication (e.g. local fdb scaling issues)
The structure is kept single for both global and per-port entries so to
avoid code duplication where possible and also because we'll soon introduce
"port0 / aka bridge as port" which should simplify things further
(thanks to Vlad for the suggestion!).
Now we have per-vlan global rhashtable (bridge-wide) and per-vlan port
rhashtable, if an entry is added to a port it'll get a pointer to its
global context so it can be quickly accessed later. There's also a
sorted vlan list which is used for stable walks and some user-visible
behaviour such as the vlan ranges, also for error paths.
VLANs are stored in a "vlan group" which currently contains the
rhashtable, sorted vlan list and the number of "real" vlan entries.
A good side-effect of this change is that it resembles how hw keeps
per-vlan data.
One important note after this change is that if a VLAN is being looked up
in the bridge's rhashtable for filtering purposes (or to check if it's an
existing usable entry, not just a global context) then the new helper
br_vlan_should_use() needs to be used if the vlan is found. In case the
lookup is done only with a port's vlan group, then this check can be
skipped.
Things tested so far:
- basic vlan ingress/egress
- pvids
- untagged vlans
- undef CONFIG_BRIDGE_VLAN_FILTERING
- adding/deleting vlans in different scenarios (with/without global ctx,
while transmitting traffic, in ranges etc)
- loading/removing the module while having/adding/deleting vlans
- extracting bridge vlan information (user ABI), compressed requests
- adding/deleting fdbs on vlans
- bridge mac change, promisc mode
- default pvid change
- kmemleak ON during the whole time
Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-09-26 01:00:11 +08:00
|
|
|
struct net_bridge_vlan_group *vg;
|
2015-08-03 19:29:16 +08:00
|
|
|
struct net_device *dev, *pdev;
|
2012-12-12 06:23:08 +08:00
|
|
|
struct br_mdb_entry *entry;
|
2015-08-03 19:29:16 +08:00
|
|
|
struct net_bridge_port *p;
|
bridge: vlan: add per-vlan struct and move to rhashtables
This patch changes the bridge vlan implementation to use rhashtables
instead of bitmaps. The main motivation behind this change is that we
need extensible per-vlan structures (both per-port and global) so more
advanced features can be introduced and the vlan support can be
extended. I've tried to break this up but the moment net_port_vlans is
changed and the whole API goes away, thus this is a larger patch.
A few short goals of this patch are:
- Extensible per-vlan structs stored in rhashtables and a sorted list
- Keep user-visible behaviour (compressed vlans etc)
- Keep fastpath ingress/egress logic the same (optimizations to come
later)
Here's a brief list of some of the new features we'd like to introduce:
- per-vlan counters
- vlan ingress/egress mapping
- per-vlan igmp configuration
- vlan priorities
- avoid fdb entries replication (e.g. local fdb scaling issues)
The structure is kept single for both global and per-port entries so to
avoid code duplication where possible and also because we'll soon introduce
"port0 / aka bridge as port" which should simplify things further
(thanks to Vlad for the suggestion!).
Now we have per-vlan global rhashtable (bridge-wide) and per-vlan port
rhashtable, if an entry is added to a port it'll get a pointer to its
global context so it can be quickly accessed later. There's also a
sorted vlan list which is used for stable walks and some user-visible
behaviour such as the vlan ranges, also for error paths.
VLANs are stored in a "vlan group" which currently contains the
rhashtable, sorted vlan list and the number of "real" vlan entries.
A good side-effect of this change is that it resembles how hw keeps
per-vlan data.
One important note after this change is that if a VLAN is being looked up
in the bridge's rhashtable for filtering purposes (or to check if it's an
existing usable entry, not just a global context) then the new helper
br_vlan_should_use() needs to be used if the vlan is found. In case the
lookup is done only with a port's vlan group, then this check can be
skipped.
Things tested so far:
- basic vlan ingress/egress
- pvids
- untagged vlans
- undef CONFIG_BRIDGE_VLAN_FILTERING
- adding/deleting vlans in different scenarios (with/without global ctx,
while transmitting traffic, in ranges etc)
- loading/removing the module while having/adding/deleting vlans
- extracting bridge vlan information (user ABI), compressed requests
- adding/deleting fdbs on vlans
- bridge mac change, promisc mode
- default pvid change
- kmemleak ON during the whole time
Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-09-26 01:00:11 +08:00
|
|
|
struct net_bridge_vlan *v;
|
2012-12-12 06:23:08 +08:00
|
|
|
struct net_bridge *br;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = br_mdb_parse(skb, nlh, &dev, &entry);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
br = netdev_priv(dev);
|
|
|
|
|
2015-08-03 19:29:16 +08:00
|
|
|
/* If vlan filtering is enabled and VLAN is not specified
|
|
|
|
* delete mdb entry on all vlans configured on the port.
|
|
|
|
*/
|
|
|
|
pdev = __dev_get_by_index(net, entry->ifindex);
|
|
|
|
if (!pdev)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
p = br_port_get_rtnl(pdev);
|
|
|
|
if (!p || p->br != br || p->state == BR_STATE_DISABLED)
|
|
|
|
return -EINVAL;
|
|
|
|
|
bridge: vlan: add per-vlan struct and move to rhashtables
This patch changes the bridge vlan implementation to use rhashtables
instead of bitmaps. The main motivation behind this change is that we
need extensible per-vlan structures (both per-port and global) so more
advanced features can be introduced and the vlan support can be
extended. I've tried to break this up but the moment net_port_vlans is
changed and the whole API goes away, thus this is a larger patch.
A few short goals of this patch are:
- Extensible per-vlan structs stored in rhashtables and a sorted list
- Keep user-visible behaviour (compressed vlans etc)
- Keep fastpath ingress/egress logic the same (optimizations to come
later)
Here's a brief list of some of the new features we'd like to introduce:
- per-vlan counters
- vlan ingress/egress mapping
- per-vlan igmp configuration
- vlan priorities
- avoid fdb entries replication (e.g. local fdb scaling issues)
The structure is kept single for both global and per-port entries so to
avoid code duplication where possible and also because we'll soon introduce
"port0 / aka bridge as port" which should simplify things further
(thanks to Vlad for the suggestion!).
Now we have per-vlan global rhashtable (bridge-wide) and per-vlan port
rhashtable, if an entry is added to a port it'll get a pointer to its
global context so it can be quickly accessed later. There's also a
sorted vlan list which is used for stable walks and some user-visible
behaviour such as the vlan ranges, also for error paths.
VLANs are stored in a "vlan group" which currently contains the
rhashtable, sorted vlan list and the number of "real" vlan entries.
A good side-effect of this change is that it resembles how hw keeps
per-vlan data.
One important note after this change is that if a VLAN is being looked up
in the bridge's rhashtable for filtering purposes (or to check if it's an
existing usable entry, not just a global context) then the new helper
br_vlan_should_use() needs to be used if the vlan is found. In case the
lookup is done only with a port's vlan group, then this check can be
skipped.
Things tested so far:
- basic vlan ingress/egress
- pvids
- untagged vlans
- undef CONFIG_BRIDGE_VLAN_FILTERING
- adding/deleting vlans in different scenarios (with/without global ctx,
while transmitting traffic, in ranges etc)
- loading/removing the module while having/adding/deleting vlans
- extracting bridge vlan information (user ABI), compressed requests
- adding/deleting fdbs on vlans
- bridge mac change, promisc mode
- default pvid change
- kmemleak ON during the whole time
Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-09-26 01:00:11 +08:00
|
|
|
vg = nbp_vlan_group(p);
|
|
|
|
if (br_vlan_enabled(br) && vg && entry->vid == 0) {
|
|
|
|
list_for_each_entry(v, &vg->vlan_list, vlist) {
|
|
|
|
entry->vid = v->vid;
|
2015-08-03 19:29:16 +08:00
|
|
|
err = __br_mdb_del(br, entry);
|
|
|
|
if (!err)
|
|
|
|
__br_mdb_notify(dev, entry, RTM_DELMDB);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = __br_mdb_del(br, entry);
|
|
|
|
if (!err)
|
|
|
|
__br_mdb_notify(dev, entry, RTM_DELMDB);
|
|
|
|
}
|
|
|
|
|
2012-12-12 06:23:08 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-12-07 08:04:48 +08:00
|
|
|
void br_mdb_init(void)
|
|
|
|
{
|
|
|
|
rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL);
|
2012-12-12 06:23:08 +08:00
|
|
|
rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
|
|
|
|
rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
|
2012-12-07 08:04:48 +08:00
|
|
|
}
|
2012-12-19 17:13:48 +08:00
|
|
|
|
|
|
|
void br_mdb_uninit(void)
|
|
|
|
{
|
|
|
|
rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
|
|
|
|
rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
|
|
|
|
rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
|
|
|
|
}
|