2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2007-07-15 09:55:06 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
|
|
|
|
*
|
|
|
|
* The code this is based on carried the following copyright notice:
|
|
|
|
* ---
|
|
|
|
* (C) Copyright 2001-2006
|
|
|
|
* Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com
|
|
|
|
* Re-worked by Ben Greear <greearb@candelatech.com>
|
|
|
|
* ---
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/string.h>
|
2008-05-13 03:21:05 +08:00
|
|
|
#include <linux/rculist.h>
|
2007-07-15 09:55:06 +08:00
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
2019-03-20 10:23:33 +08:00
|
|
|
#include <linux/net_tstamp.h>
|
2007-07-15 09:55:06 +08:00
|
|
|
#include <linux/ethtool.h>
|
|
|
|
#include <linux/if_arp.h>
|
2011-12-08 12:11:17 +08:00
|
|
|
#include <linux/if_vlan.h>
|
2007-07-15 09:55:06 +08:00
|
|
|
#include <linux/if_link.h>
|
|
|
|
#include <linux/if_macvlan.h>
|
2013-02-06 04:22:50 +08:00
|
|
|
#include <linux/hash.h>
|
2014-04-17 13:45:59 +08:00
|
|
|
#include <linux/workqueue.h>
|
2007-07-15 09:55:06 +08:00
|
|
|
#include <net/rtnetlink.h>
|
2009-11-26 14:07:10 +08:00
|
|
|
#include <net/xfrm.h>
|
2014-05-30 16:00:56 +08:00
|
|
|
#include <linux/netpoll.h>
|
2019-03-20 10:23:33 +08:00
|
|
|
#include <linux/phy.h>
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
#define MACVLAN_HASH_BITS 8
|
|
|
|
#define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS)
|
2014-09-17 16:08:08 +08:00
|
|
|
#define MACVLAN_BC_QUEUE_LEN 1000
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2017-06-21 19:59:18 +08:00
|
|
|
#define MACVLAN_F_PASSTHRU 1
|
2017-06-21 19:59:19 +08:00
|
|
|
#define MACVLAN_F_ADDRCHANGE 2
|
2017-06-21 19:59:18 +08:00
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
struct macvlan_port {
|
|
|
|
struct net_device *dev;
|
|
|
|
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
|
|
|
|
struct list_head vlans;
|
2014-04-17 13:45:59 +08:00
|
|
|
struct sk_buff_head bc_queue;
|
|
|
|
struct work_struct bc_work;
|
2017-06-21 19:59:18 +08:00
|
|
|
u32 flags;
|
2014-08-15 05:32:49 +08:00
|
|
|
int count;
|
2014-09-25 22:31:08 +08:00
|
|
|
struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE];
|
2016-06-01 11:45:44 +08:00
|
|
|
DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
|
2017-06-21 19:59:19 +08:00
|
|
|
unsigned char perm_addr[ETH_ALEN];
|
2014-09-25 22:31:08 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct macvlan_source_entry {
|
|
|
|
struct hlist_node hlist;
|
|
|
|
struct macvlan_dev *vlan;
|
|
|
|
unsigned char addr[6+2] __aligned(sizeof(u16));
|
|
|
|
struct rcu_head rcu;
|
2007-07-15 09:55:06 +08:00
|
|
|
};
|
|
|
|
|
2014-04-17 13:45:59 +08:00
|
|
|
struct macvlan_skb_cb {
|
|
|
|
const struct macvlan_dev *src;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
|
|
|
|
|
2011-03-22 09:22:22 +08:00
|
|
|
static void macvlan_port_destroy(struct net_device *dev);
|
|
|
|
|
2017-06-21 19:59:18 +08:00
|
|
|
static inline bool macvlan_passthru(const struct macvlan_port *port)
|
|
|
|
{
|
|
|
|
return port->flags & MACVLAN_F_PASSTHRU;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void macvlan_set_passthru(struct macvlan_port *port)
|
|
|
|
{
|
|
|
|
port->flags |= MACVLAN_F_PASSTHRU;
|
|
|
|
}
|
|
|
|
|
2017-06-21 19:59:19 +08:00
|
|
|
static inline bool macvlan_addr_change(const struct macvlan_port *port)
|
|
|
|
{
|
|
|
|
return port->flags & MACVLAN_F_ADDRCHANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void macvlan_set_addr_change(struct macvlan_port *port)
|
|
|
|
{
|
|
|
|
port->flags |= MACVLAN_F_ADDRCHANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void macvlan_clear_addr_change(struct macvlan_port *port)
|
|
|
|
{
|
|
|
|
port->flags &= ~MACVLAN_F_ADDRCHANGE;
|
|
|
|
}
|
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
/* Hash Ethernet address */
|
|
|
|
static u32 macvlan_eth_hash(const unsigned char *addr)
|
|
|
|
{
|
|
|
|
u64 value = get_unaligned((u64 *)addr);
|
|
|
|
|
|
|
|
/* only want 6 bytes */
|
|
|
|
#ifdef __BIG_ENDIAN
|
|
|
|
value >>= 16;
|
|
|
|
#else
|
|
|
|
value <<= 16;
|
|
|
|
#endif
|
|
|
|
return hash_64(value, MACVLAN_HASH_BITS);
|
|
|
|
}
|
|
|
|
|
2013-03-30 18:08:44 +08:00
|
|
|
static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return rcu_dereference(dev->rx_handler_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return rtnl_dereference(dev->rx_handler_data);
|
|
|
|
}
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
|
|
|
|
const unsigned char *addr)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan;
|
2014-09-25 22:31:08 +08:00
|
|
|
u32 idx = macvlan_eth_hash(addr);
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
hlist_for_each_entry_rcu(vlan, &port->vlan_hash[idx], hlist) {
|
net, drivers/net: Convert compare_ether_addr_64bits to ether_addr_equal_64bits
Use the new bool function ether_addr_equal_64bits to add
some clarity and reduce the likelihood for misuse of
compare_ether_addr_64bits for sorting.
Done via cocci script:
$ cat compare_ether_addr_64bits.cocci
@@
expression a,b;
@@
- !compare_ether_addr_64bits(a, b)
+ ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- compare_ether_addr_64bits(a, b)
+ !ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- !ether_addr_equal_64bits(a, b) == 0
+ ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- !ether_addr_equal_64bits(a, b) != 0
+ !ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- ether_addr_equal_64bits(a, b) == 0
+ !ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- ether_addr_equal_64bits(a, b) != 0
+ ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal_64bits(a, b)
+ ether_addr_equal_64bits(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-10 01:04:04 +08:00
|
|
|
if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
|
2007-07-15 09:55:06 +08:00
|
|
|
return vlan;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
static struct macvlan_source_entry *macvlan_hash_lookup_source(
|
|
|
|
const struct macvlan_dev *vlan,
|
|
|
|
const unsigned char *addr)
|
|
|
|
{
|
|
|
|
struct macvlan_source_entry *entry;
|
|
|
|
u32 idx = macvlan_eth_hash(addr);
|
|
|
|
struct hlist_head *h = &vlan->port->vlan_source_hash[idx];
|
|
|
|
|
|
|
|
hlist_for_each_entry_rcu(entry, h, hlist) {
|
|
|
|
if (ether_addr_equal_64bits(entry->addr, addr) &&
|
|
|
|
entry->vlan == vlan)
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macvlan_hash_add_source(struct macvlan_dev *vlan,
|
|
|
|
const unsigned char *addr)
|
|
|
|
{
|
|
|
|
struct macvlan_port *port = vlan->port;
|
|
|
|
struct macvlan_source_entry *entry;
|
|
|
|
struct hlist_head *h;
|
|
|
|
|
|
|
|
entry = macvlan_hash_lookup_source(vlan, addr);
|
|
|
|
if (entry)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ether_addr_copy(entry->addr, addr);
|
|
|
|
entry->vlan = vlan;
|
|
|
|
h = &port->vlan_source_hash[macvlan_eth_hash(addr)];
|
|
|
|
hlist_add_head_rcu(&entry->hlist, h);
|
|
|
|
vlan->macaddr_count++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-03-14 04:16:13 +08:00
|
|
|
static void macvlan_hash_add(struct macvlan_dev *vlan)
|
|
|
|
{
|
|
|
|
struct macvlan_port *port = vlan->port;
|
|
|
|
const unsigned char *addr = vlan->dev->dev_addr;
|
2014-09-25 22:31:08 +08:00
|
|
|
u32 idx = macvlan_eth_hash(addr);
|
2009-03-14 04:16:13 +08:00
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[idx]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macvlan_hash_del_source(struct macvlan_source_entry *entry)
|
|
|
|
{
|
|
|
|
hlist_del_rcu(&entry->hlist);
|
|
|
|
kfree_rcu(entry, rcu);
|
2009-03-14 04:16:13 +08:00
|
|
|
}
|
|
|
|
|
2011-05-19 20:24:16 +08:00
|
|
|
static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync)
|
2009-03-14 04:16:13 +08:00
|
|
|
{
|
|
|
|
hlist_del_rcu(&vlan->hlist);
|
2011-05-19 20:24:16 +08:00
|
|
|
if (sync)
|
|
|
|
synchronize_rcu();
|
2009-03-14 04:16:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
|
|
|
|
const unsigned char *addr)
|
|
|
|
{
|
2011-05-19 20:24:16 +08:00
|
|
|
macvlan_hash_del(vlan, true);
|
2009-03-14 04:16:13 +08:00
|
|
|
/* Now that we are unhashed it is safe to change the device
|
|
|
|
* address without confusing packet delivery.
|
|
|
|
*/
|
|
|
|
memcpy(vlan->dev->dev_addr, addr, ETH_ALEN);
|
|
|
|
macvlan_hash_add(vlan);
|
|
|
|
}
|
|
|
|
|
2016-11-14 08:24:19 +08:00
|
|
|
static bool macvlan_addr_busy(const struct macvlan_port *port,
|
|
|
|
const unsigned char *addr)
|
2009-03-14 04:16:13 +08:00
|
|
|
{
|
2017-06-21 19:59:19 +08:00
|
|
|
/* Test to see if the specified address is
|
2009-03-14 04:16:13 +08:00
|
|
|
* currently in use by the underlying device or
|
|
|
|
* another macvlan.
|
|
|
|
*/
|
2017-06-21 19:59:19 +08:00
|
|
|
if (!macvlan_passthru(port) && !macvlan_addr_change(port) &&
|
2017-06-21 19:59:17 +08:00
|
|
|
ether_addr_equal_64bits(port->dev->dev_addr, addr))
|
2016-11-14 08:24:19 +08:00
|
|
|
return true;
|
2009-03-14 04:16:13 +08:00
|
|
|
|
|
|
|
if (macvlan_hash_lookup(port, addr))
|
2016-11-14 08:24:19 +08:00
|
|
|
return true;
|
2009-03-14 04:16:13 +08:00
|
|
|
|
2016-11-14 08:24:19 +08:00
|
|
|
return false;
|
2009-03-14 04:16:13 +08:00
|
|
|
}
|
|
|
|
|
2009-11-26 14:07:09 +08:00
|
|
|
|
2010-01-30 20:23:40 +08:00
|
|
|
static int macvlan_broadcast_one(struct sk_buff *skb,
|
|
|
|
const struct macvlan_dev *vlan,
|
2009-11-26 14:07:10 +08:00
|
|
|
const struct ethhdr *eth, bool local)
|
2009-11-26 14:07:09 +08:00
|
|
|
{
|
2010-01-30 20:23:40 +08:00
|
|
|
struct net_device *dev = vlan->dev;
|
2009-11-26 14:07:09 +08:00
|
|
|
|
2009-11-26 14:07:10 +08:00
|
|
|
if (local)
|
2014-04-17 13:45:59 +08:00
|
|
|
return __dev_forward_skb(dev, skb);
|
2009-11-26 14:07:10 +08:00
|
|
|
|
2009-11-26 14:07:09 +08:00
|
|
|
skb->dev = dev;
|
net, drivers/net: Convert compare_ether_addr_64bits to ether_addr_equal_64bits
Use the new bool function ether_addr_equal_64bits to add
some clarity and reduce the likelihood for misuse of
compare_ether_addr_64bits for sorting.
Done via cocci script:
$ cat compare_ether_addr_64bits.cocci
@@
expression a,b;
@@
- !compare_ether_addr_64bits(a, b)
+ ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- compare_ether_addr_64bits(a, b)
+ !ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- !ether_addr_equal_64bits(a, b) == 0
+ ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- !ether_addr_equal_64bits(a, b) != 0
+ !ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- ether_addr_equal_64bits(a, b) == 0
+ !ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- ether_addr_equal_64bits(a, b) != 0
+ ether_addr_equal_64bits(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal_64bits(a, b)
+ ether_addr_equal_64bits(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-10 01:04:04 +08:00
|
|
|
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
|
2009-11-26 14:07:09 +08:00
|
|
|
skb->pkt_type = PACKET_BROADCAST;
|
|
|
|
else
|
|
|
|
skb->pkt_type = PACKET_MULTICAST;
|
|
|
|
|
2014-04-17 13:45:59 +08:00
|
|
|
return 0;
|
2009-11-26 14:07:09 +08:00
|
|
|
}
|
|
|
|
|
2013-02-08 00:41:02 +08:00
|
|
|
static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
|
|
|
|
{
|
|
|
|
return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static unsigned int mc_hash(const struct macvlan_dev *vlan,
|
|
|
|
const unsigned char *addr)
|
2013-02-06 04:22:50 +08:00
|
|
|
{
|
|
|
|
u32 val = __get_unaligned_cpu32(addr + 2);
|
|
|
|
|
2013-02-08 00:41:02 +08:00
|
|
|
val ^= macvlan_hash_mix(vlan);
|
2013-02-06 04:22:50 +08:00
|
|
|
return hash_32(val, MACVLAN_MC_FILTER_BITS);
|
|
|
|
}
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
static void macvlan_broadcast(struct sk_buff *skb,
|
2009-11-26 14:07:10 +08:00
|
|
|
const struct macvlan_port *port,
|
|
|
|
struct net_device *src,
|
|
|
|
enum macvlan_mode mode)
|
2007-07-15 09:55:06 +08:00
|
|
|
{
|
|
|
|
const struct ethhdr *eth = eth_hdr(skb);
|
|
|
|
const struct macvlan_dev *vlan;
|
|
|
|
struct sk_buff *nskb;
|
|
|
|
unsigned int i;
|
2009-11-26 14:07:09 +08:00
|
|
|
int err;
|
2013-02-08 00:41:02 +08:00
|
|
|
unsigned int hash;
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2008-11-27 07:30:48 +08:00
|
|
|
if (skb->protocol == htons(ETH_P_PAUSE))
|
|
|
|
return;
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
|
2009-11-26 14:07:10 +08:00
|
|
|
if (vlan->dev == src || !(vlan->mode & mode))
|
|
|
|
continue;
|
|
|
|
|
2013-02-08 00:41:02 +08:00
|
|
|
hash = mc_hash(vlan, eth->h_dest);
|
2013-02-06 04:22:50 +08:00
|
|
|
if (!test_bit(hash, vlan->mc_filter))
|
|
|
|
continue;
|
2013-09-07 10:27:11 +08:00
|
|
|
|
|
|
|
err = NET_RX_DROP;
|
2007-07-15 09:55:06 +08:00
|
|
|
nskb = skb_clone(skb, GFP_ATOMIC);
|
2013-09-07 10:27:11 +08:00
|
|
|
if (likely(nskb))
|
|
|
|
err = macvlan_broadcast_one(
|
|
|
|
nskb, vlan, eth,
|
2014-04-17 13:45:59 +08:00
|
|
|
mode == MACVLAN_MODE_BRIDGE) ?:
|
|
|
|
netif_rx_ni(nskb);
|
2009-11-26 14:07:09 +08:00
|
|
|
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
|
2014-10-10 11:13:27 +08:00
|
|
|
err == NET_RX_SUCCESS, true);
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-17 13:45:59 +08:00
|
|
|
static void macvlan_process_broadcast(struct work_struct *w)
|
2007-07-15 09:55:06 +08:00
|
|
|
{
|
2014-04-17 13:45:59 +08:00
|
|
|
struct macvlan_port *port = container_of(w, struct macvlan_port,
|
|
|
|
bc_work);
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct sk_buff_head list;
|
|
|
|
|
2014-10-23 10:43:46 +08:00
|
|
|
__skb_queue_head_init(&list);
|
2014-04-17 13:45:59 +08:00
|
|
|
|
|
|
|
spin_lock_bh(&port->bc_queue.lock);
|
|
|
|
skb_queue_splice_tail_init(&port->bc_queue, &list);
|
|
|
|
spin_unlock_bh(&port->bc_queue.lock);
|
|
|
|
|
|
|
|
while ((skb = __skb_dequeue(&list))) {
|
|
|
|
const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2009-11-26 14:07:10 +08:00
|
|
|
if (!src)
|
|
|
|
/* frame comes from an external address */
|
|
|
|
macvlan_broadcast(skb, port, NULL,
|
|
|
|
MACVLAN_MODE_PRIVATE |
|
|
|
|
MACVLAN_MODE_VEPA |
|
2010-10-28 21:10:50 +08:00
|
|
|
MACVLAN_MODE_PASSTHRU|
|
2009-11-26 14:07:10 +08:00
|
|
|
MACVLAN_MODE_BRIDGE);
|
|
|
|
else if (src->mode == MACVLAN_MODE_VEPA)
|
|
|
|
/* flood to everyone except source */
|
|
|
|
macvlan_broadcast(skb, port, src->dev,
|
|
|
|
MACVLAN_MODE_VEPA |
|
|
|
|
MACVLAN_MODE_BRIDGE);
|
2014-04-17 13:45:59 +08:00
|
|
|
else
|
2009-11-26 14:07:10 +08:00
|
|
|
/*
|
|
|
|
* flood only to VEPA ports, bridge ports
|
|
|
|
* already saw the frame on the way out.
|
|
|
|
*/
|
|
|
|
macvlan_broadcast(skb, port, src->dev,
|
|
|
|
MACVLAN_MODE_VEPA);
|
2014-04-17 13:45:59 +08:00
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2016-06-01 11:43:00 +08:00
|
|
|
if (src)
|
|
|
|
dev_put(src->dev);
|
2019-01-17 23:30:03 +08:00
|
|
|
consume_skb(skb);
|
2014-04-17 13:45:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macvlan_broadcast_enqueue(struct macvlan_port *port,
|
2016-06-01 11:43:00 +08:00
|
|
|
const struct macvlan_dev *src,
|
2014-04-17 13:45:59 +08:00
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
2014-04-22 17:15:34 +08:00
|
|
|
struct sk_buff *nskb;
|
2014-04-17 13:45:59 +08:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
2014-04-22 17:15:34 +08:00
|
|
|
nskb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (!nskb)
|
2014-04-17 13:45:59 +08:00
|
|
|
goto err;
|
|
|
|
|
2016-06-01 11:43:00 +08:00
|
|
|
MACVLAN_SKB_CB(nskb)->src = src;
|
|
|
|
|
2014-04-17 13:45:59 +08:00
|
|
|
spin_lock(&port->bc_queue.lock);
|
2014-09-17 16:08:08 +08:00
|
|
|
if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
|
2016-06-01 11:43:00 +08:00
|
|
|
if (src)
|
|
|
|
dev_hold(src->dev);
|
2014-04-22 17:15:34 +08:00
|
|
|
__skb_queue_tail(&port->bc_queue, nskb);
|
2014-04-17 13:45:59 +08:00
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
spin_unlock(&port->bc_queue.lock);
|
|
|
|
|
|
|
|
if (err)
|
2014-04-22 17:15:34 +08:00
|
|
|
goto free_nskb;
|
2014-04-17 13:45:59 +08:00
|
|
|
|
|
|
|
schedule_work(&port->bc_work);
|
|
|
|
return;
|
|
|
|
|
2014-04-22 17:15:34 +08:00
|
|
|
free_nskb:
|
|
|
|
kfree_skb(nskb);
|
2014-04-17 13:45:59 +08:00
|
|
|
err:
|
|
|
|
atomic_long_inc(&skb->dev->rx_dropped);
|
|
|
|
}
|
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
static void macvlan_flush_sources(struct macvlan_port *port,
|
|
|
|
struct macvlan_dev *vlan)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
|
|
|
|
struct hlist_node *h, *n;
|
|
|
|
|
|
|
|
hlist_for_each_safe(h, n, &port->vlan_source_hash[i]) {
|
|
|
|
struct macvlan_source_entry *entry;
|
|
|
|
|
|
|
|
entry = hlist_entry(h, struct macvlan_source_entry,
|
|
|
|
hlist);
|
|
|
|
if (entry->vlan == vlan)
|
|
|
|
macvlan_hash_del_source(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
vlan->macaddr_count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macvlan_forward_source_one(struct sk_buff *skb,
|
|
|
|
struct macvlan_dev *vlan)
|
|
|
|
{
|
|
|
|
struct sk_buff *nskb;
|
|
|
|
struct net_device *dev;
|
|
|
|
int len;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev = vlan->dev;
|
|
|
|
if (unlikely(!(dev->flags & IFF_UP)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
nskb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (!nskb)
|
|
|
|
return;
|
|
|
|
|
|
|
|
len = nskb->len + ETH_HLEN;
|
|
|
|
nskb->dev = dev;
|
2017-10-14 04:40:31 +08:00
|
|
|
|
|
|
|
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr))
|
|
|
|
nskb->pkt_type = PACKET_HOST;
|
2014-09-25 22:31:08 +08:00
|
|
|
|
|
|
|
ret = netif_rx(nskb);
|
2014-10-10 11:13:27 +08:00
|
|
|
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
|
2014-09-25 22:31:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macvlan_forward_source(struct sk_buff *skb,
|
|
|
|
struct macvlan_port *port,
|
|
|
|
const unsigned char *addr)
|
|
|
|
{
|
|
|
|
struct macvlan_source_entry *entry;
|
|
|
|
u32 idx = macvlan_eth_hash(addr);
|
|
|
|
struct hlist_head *h = &port->vlan_source_hash[idx];
|
|
|
|
|
|
|
|
hlist_for_each_entry_rcu(entry, h, hlist) {
|
|
|
|
if (ether_addr_equal_64bits(entry->addr, addr))
|
2016-11-21 08:26:38 +08:00
|
|
|
macvlan_forward_source_one(skb, entry->vlan);
|
2014-09-25 22:31:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-17 13:45:59 +08:00
|
|
|
/* called under rcu_read_lock() from netif_receive_skb */
|
|
|
|
static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
|
|
|
|
{
|
|
|
|
struct macvlan_port *port;
|
|
|
|
struct sk_buff *skb = *pskb;
|
|
|
|
const struct ethhdr *eth = eth_hdr(skb);
|
|
|
|
const struct macvlan_dev *vlan;
|
|
|
|
const struct macvlan_dev *src;
|
|
|
|
struct net_device *dev;
|
|
|
|
unsigned int len = 0;
|
macvlan: optimize the receive path
The netif_rx() call on the fast path of macvlan_handle_frame() appears to
be there to ensure that we properly throttle incoming packets. However, it
would appear as though the proper throttling is already in place for all
possible ingress paths, and that the call is redundant. If packets are arriving
from the physical NIC, we've already throttled them by this point. Otherwise,
if they are coming via macvlan_queue_xmit(), it calls either
'dev_forward_skb()', which ends up calling netif_rx_internal(), or else in
the broadcast case, we are throttling via macvlan_broadcast_enqueue().
The test results below are from off the box to an lxc instance running macvlan.
Once the tranactions/sec stop increasing, the cpu idle time has gone to 0.
Results are from a quad core Intel E3-1270 V2@3.50GHz box with bnx2x 10G card.
for i in {10,100,200,300,400,500};
do super_netperf $i -H $ip -t TCP_RR; done
Average of 5 runs.
trans/sec trans/sec
(3.17-rc7-net-next) (3.17-rc7-net-next + this patch)
---------- ----------
208101 211534 (+1.6%)
839493 850162 (+1.3%)
845071 844053 (-.12%)
816330 819623 (+.4%)
778700 789938 (+1.4%)
735984 754408 (+2.5%)
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-10-10 11:13:31 +08:00
|
|
|
int ret;
|
|
|
|
rx_handler_result_t handle_res;
|
2014-04-17 13:45:59 +08:00
|
|
|
|
|
|
|
port = macvlan_port_get_rcu(skb->dev);
|
|
|
|
if (is_multicast_ether_addr(eth->h_dest)) {
|
2016-06-01 11:45:44 +08:00
|
|
|
unsigned int hash;
|
|
|
|
|
2015-10-10 02:44:54 +08:00
|
|
|
skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
|
2014-04-17 13:45:59 +08:00
|
|
|
if (!skb)
|
|
|
|
return RX_HANDLER_CONSUMED;
|
2015-11-17 05:54:20 +08:00
|
|
|
*pskb = skb;
|
2014-04-17 13:45:59 +08:00
|
|
|
eth = eth_hdr(skb);
|
2014-09-25 22:31:08 +08:00
|
|
|
macvlan_forward_source(skb, port, eth->h_source);
|
2014-04-17 13:45:59 +08:00
|
|
|
src = macvlan_hash_lookup(port, eth->h_source);
|
|
|
|
if (src && src->mode != MACVLAN_MODE_VEPA &&
|
|
|
|
src->mode != MACVLAN_MODE_BRIDGE) {
|
2011-11-02 20:11:53 +08:00
|
|
|
/* forward to original port. */
|
|
|
|
vlan = src;
|
2014-04-17 13:45:59 +08:00
|
|
|
ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
|
|
|
|
netif_rx(skb);
|
macvlan: optimize the receive path
The netif_rx() call on the fast path of macvlan_handle_frame() appears to
be there to ensure that we properly throttle incoming packets. However, it
would appear as though the proper throttling is already in place for all
possible ingress paths, and that the call is redundant. If packets are arriving
from the physical NIC, we've already throttled them by this point. Otherwise,
if they are coming via macvlan_queue_xmit(), it calls either
'dev_forward_skb()', which ends up calling netif_rx_internal(), or else in
the broadcast case, we are throttling via macvlan_broadcast_enqueue().
The test results below are from off the box to an lxc instance running macvlan.
Once the tranactions/sec stop increasing, the cpu idle time has gone to 0.
Results are from a quad core Intel E3-1270 V2@3.50GHz box with bnx2x 10G card.
for i in {10,100,200,300,400,500};
do super_netperf $i -H $ip -t TCP_RR; done
Average of 5 runs.
trans/sec trans/sec
(3.17-rc7-net-next) (3.17-rc7-net-next + this patch)
---------- ----------
208101 211534 (+1.6%)
839493 850162 (+1.3%)
845071 844053 (-.12%)
816330 819623 (+.4%)
778700 789938 (+1.4%)
735984 754408 (+2.5%)
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-10-10 11:13:31 +08:00
|
|
|
handle_res = RX_HANDLER_CONSUMED;
|
2011-11-02 20:11:53 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-06-01 11:45:44 +08:00
|
|
|
hash = mc_hash(NULL, eth->h_dest);
|
|
|
|
if (test_bit(hash, port->mc_filter))
|
|
|
|
macvlan_broadcast_enqueue(port, src, skb);
|
2014-04-17 13:45:59 +08:00
|
|
|
|
2011-03-12 11:14:39 +08:00
|
|
|
return RX_HANDLER_PASS;
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
macvlan_forward_source(skb, port, eth->h_source);
|
2017-06-21 19:59:18 +08:00
|
|
|
if (macvlan_passthru(port))
|
2013-05-09 12:23:40 +08:00
|
|
|
vlan = list_first_or_null_rcu(&port->vlans,
|
|
|
|
struct macvlan_dev, list);
|
2010-10-28 21:10:50 +08:00
|
|
|
else
|
|
|
|
vlan = macvlan_hash_lookup(port, eth->h_dest);
|
2017-10-14 04:40:24 +08:00
|
|
|
if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE)
|
2011-03-12 11:14:39 +08:00
|
|
|
return RX_HANDLER_PASS;
|
2007-07-15 09:55:06 +08:00
|
|
|
|
|
|
|
dev = vlan->dev;
|
|
|
|
if (unlikely(!(dev->flags & IFF_UP))) {
|
|
|
|
kfree_skb(skb);
|
2011-03-12 11:14:39 +08:00
|
|
|
return RX_HANDLER_CONSUMED;
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
2009-11-26 14:07:09 +08:00
|
|
|
len = skb->len + ETH_HLEN;
|
2007-07-15 09:55:06 +08:00
|
|
|
skb = skb_share_check(skb, GFP_ATOMIC);
|
macvlan: optimize the receive path
The netif_rx() call on the fast path of macvlan_handle_frame() appears to
be there to ensure that we properly throttle incoming packets. However, it
would appear as though the proper throttling is already in place for all
possible ingress paths, and that the call is redundant. If packets are arriving
from the physical NIC, we've already throttled them by this point. Otherwise,
if they are coming via macvlan_queue_xmit(), it calls either
'dev_forward_skb()', which ends up calling netif_rx_internal(), or else in
the broadcast case, we are throttling via macvlan_broadcast_enqueue().
The test results below are from off the box to an lxc instance running macvlan.
Once the tranactions/sec stop increasing, the cpu idle time has gone to 0.
Results are from a quad core Intel E3-1270 V2@3.50GHz box with bnx2x 10G card.
for i in {10,100,200,300,400,500};
do super_netperf $i -H $ip -t TCP_RR; done
Average of 5 runs.
trans/sec trans/sec
(3.17-rc7-net-next) (3.17-rc7-net-next + this patch)
---------- ----------
208101 211534 (+1.6%)
839493 850162 (+1.3%)
845071 844053 (-.12%)
816330 819623 (+.4%)
778700 789938 (+1.4%)
735984 754408 (+2.5%)
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-10-10 11:13:31 +08:00
|
|
|
if (!skb) {
|
|
|
|
ret = NET_RX_DROP;
|
|
|
|
handle_res = RX_HANDLER_CONSUMED;
|
2010-07-27 17:10:07 +08:00
|
|
|
goto out;
|
macvlan: optimize the receive path
The netif_rx() call on the fast path of macvlan_handle_frame() appears to
be there to ensure that we properly throttle incoming packets. However, it
would appear as though the proper throttling is already in place for all
possible ingress paths, and that the call is redundant. If packets are arriving
from the physical NIC, we've already throttled them by this point. Otherwise,
if they are coming via macvlan_queue_xmit(), it calls either
'dev_forward_skb()', which ends up calling netif_rx_internal(), or else in
the broadcast case, we are throttling via macvlan_broadcast_enqueue().
The test results below are from off the box to an lxc instance running macvlan.
Once the tranactions/sec stop increasing, the cpu idle time has gone to 0.
Results are from a quad core Intel E3-1270 V2@3.50GHz box with bnx2x 10G card.
for i in {10,100,200,300,400,500};
do super_netperf $i -H $ip -t TCP_RR; done
Average of 5 runs.
trans/sec trans/sec
(3.17-rc7-net-next) (3.17-rc7-net-next + this patch)
---------- ----------
208101 211534 (+1.6%)
839493 850162 (+1.3%)
845071 844053 (-.12%)
816330 819623 (+.4%)
778700 789938 (+1.4%)
735984 754408 (+2.5%)
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-10-10 11:13:31 +08:00
|
|
|
}
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2015-11-17 05:54:20 +08:00
|
|
|
*pskb = skb;
|
2007-07-15 09:55:06 +08:00
|
|
|
skb->dev = dev;
|
|
|
|
skb->pkt_type = PACKET_HOST;
|
|
|
|
|
macvlan: optimize the receive path
The netif_rx() call on the fast path of macvlan_handle_frame() appears to
be there to ensure that we properly throttle incoming packets. However, it
would appear as though the proper throttling is already in place for all
possible ingress paths, and that the call is redundant. If packets are arriving
from the physical NIC, we've already throttled them by this point. Otherwise,
if they are coming via macvlan_queue_xmit(), it calls either
'dev_forward_skb()', which ends up calling netif_rx_internal(), or else in
the broadcast case, we are throttling via macvlan_broadcast_enqueue().
The test results below are from off the box to an lxc instance running macvlan.
Once the tranactions/sec stop increasing, the cpu idle time has gone to 0.
Results are from a quad core Intel E3-1270 V2@3.50GHz box with bnx2x 10G card.
for i in {10,100,200,300,400,500};
do super_netperf $i -H $ip -t TCP_RR; done
Average of 5 runs.
trans/sec trans/sec
(3.17-rc7-net-next) (3.17-rc7-net-next + this patch)
---------- ----------
208101 211534 (+1.6%)
839493 850162 (+1.3%)
845071 844053 (-.12%)
816330 819623 (+.4%)
778700 789938 (+1.4%)
735984 754408 (+2.5%)
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-10-10 11:13:31 +08:00
|
|
|
ret = NET_RX_SUCCESS;
|
|
|
|
handle_res = RX_HANDLER_ANOTHER;
|
2010-07-27 17:10:07 +08:00
|
|
|
out:
|
2014-10-10 11:13:27 +08:00
|
|
|
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
|
macvlan: optimize the receive path
The netif_rx() call on the fast path of macvlan_handle_frame() appears to
be there to ensure that we properly throttle incoming packets. However, it
would appear as though the proper throttling is already in place for all
possible ingress paths, and that the call is redundant. If packets are arriving
from the physical NIC, we've already throttled them by this point. Otherwise,
if they are coming via macvlan_queue_xmit(), it calls either
'dev_forward_skb()', which ends up calling netif_rx_internal(), or else in
the broadcast case, we are throttling via macvlan_broadcast_enqueue().
The test results below are from off the box to an lxc instance running macvlan.
Once the tranactions/sec stop increasing, the cpu idle time has gone to 0.
Results are from a quad core Intel E3-1270 V2@3.50GHz box with bnx2x 10G card.
for i in {10,100,200,300,400,500};
do super_netperf $i -H $ip -t TCP_RR; done
Average of 5 runs.
trans/sec trans/sec
(3.17-rc7-net-next) (3.17-rc7-net-next + this patch)
---------- ----------
208101 211534 (+1.6%)
839493 850162 (+1.3%)
845071 844053 (-.12%)
816330 819623 (+.4%)
778700 789938 (+1.4%)
735984 754408 (+2.5%)
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-10-10 11:13:31 +08:00
|
|
|
return handle_res;
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
2009-11-26 14:07:10 +08:00
|
|
|
static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
const struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
const struct macvlan_port *port = vlan->port;
|
|
|
|
const struct macvlan_dev *dest;
|
|
|
|
|
|
|
|
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
|
|
|
|
const struct ethhdr *eth = (void *)skb->data;
|
|
|
|
|
|
|
|
/* send to other bridge ports directly */
|
|
|
|
if (is_multicast_ether_addr(eth->h_dest)) {
|
|
|
|
macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
|
|
|
|
goto xmit_world;
|
|
|
|
}
|
|
|
|
|
|
|
|
dest = macvlan_hash_lookup(port, eth->h_dest);
|
|
|
|
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
|
2011-05-19 10:53:20 +08:00
|
|
|
/* send to lowerdev first for its network taps */
|
2011-09-18 20:53:20 +08:00
|
|
|
dev_forward_skb(vlan->lowerdev, skb);
|
2009-11-26 14:07:10 +08:00
|
|
|
|
|
|
|
return NET_XMIT_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
xmit_world:
|
2012-05-11 11:03:34 +08:00
|
|
|
skb->dev = vlan->lowerdev;
|
2018-07-10 00:19:48 +08:00
|
|
|
return dev_queue_xmit_accel(skb,
|
|
|
|
netdev_get_sb_channel(dev) ? dev : NULL);
|
2009-11-26 14:07:10 +08:00
|
|
|
}
|
|
|
|
|
2014-05-30 16:00:56 +08:00
|
|
|
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
if (vlan->netpoll)
|
|
|
|
netpoll_send_skb(vlan->netpoll, skb);
|
|
|
|
#else
|
|
|
|
BUG();
|
|
|
|
#endif
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2013-12-28 04:06:46 +08:00
|
|
|
static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2007-07-15 09:55:06 +08:00
|
|
|
{
|
2018-04-04 05:16:09 +08:00
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
2007-07-15 09:55:06 +08:00
|
|
|
unsigned int len = skb->len;
|
|
|
|
int ret;
|
2014-05-30 16:00:56 +08:00
|
|
|
|
|
|
|
if (unlikely(netpoll_tx_running(dev)))
|
|
|
|
return macvlan_netpoll_send_skb(vlan, skb);
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2018-04-04 05:16:09 +08:00
|
|
|
ret = macvlan_queue_xmit(skb, dev);
|
2013-11-07 01:54:46 +08:00
|
|
|
|
2010-05-10 12:51:02 +08:00
|
|
|
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
|
2014-01-04 14:22:34 +08:00
|
|
|
struct vlan_pcpu_stats *pcpu_stats;
|
2009-09-03 08:11:45 +08:00
|
|
|
|
2010-11-11 05:14:04 +08:00
|
|
|
pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
|
|
|
|
u64_stats_update_begin(&pcpu_stats->syncp);
|
|
|
|
pcpu_stats->tx_packets++;
|
|
|
|
pcpu_stats->tx_bytes += len;
|
|
|
|
u64_stats_update_end(&pcpu_stats->syncp);
|
|
|
|
} else {
|
|
|
|
this_cpu_inc(vlan->pcpu_stats->tx_dropped);
|
|
|
|
}
|
2009-11-10 14:14:24 +08:00
|
|
|
return ret;
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
|
2007-10-09 16:40:57 +08:00
|
|
|
unsigned short type, const void *daddr,
|
|
|
|
const void *saddr, unsigned len)
|
2007-07-15 09:55:06 +08:00
|
|
|
{
|
|
|
|
const struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct net_device *lowerdev = vlan->lowerdev;
|
|
|
|
|
2007-10-09 16:36:32 +08:00
|
|
|
return dev_hard_header(skb, lowerdev, type, daddr,
|
|
|
|
saddr ? : dev->dev_addr, len);
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
2007-10-09 16:40:57 +08:00
|
|
|
static const struct header_ops macvlan_hard_header_ops = {
|
|
|
|
.create = macvlan_hard_header,
|
|
|
|
.parse = eth_header_parse,
|
|
|
|
.cache = eth_header_cache,
|
|
|
|
.cache_update = eth_header_cache_update,
|
|
|
|
};
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
static int macvlan_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct net_device *lowerdev = vlan->lowerdev;
|
|
|
|
int err;
|
|
|
|
|
2017-06-21 19:59:18 +08:00
|
|
|
if (macvlan_passthru(vlan->port)) {
|
2013-08-01 18:50:10 +08:00
|
|
|
if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
|
|
|
|
err = dev_set_promiscuity(lowerdev, 1);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
2010-10-28 21:10:50 +08:00
|
|
|
goto hash_add;
|
|
|
|
}
|
|
|
|
|
2018-12-01 07:26:27 +08:00
|
|
|
err = -EADDRINUSE;
|
2018-04-04 05:16:03 +08:00
|
|
|
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Attempt to populate accel_priv which is used to offload the L2
|
|
|
|
* forwarding requests for unicast packets.
|
|
|
|
*/
|
2018-04-04 05:16:09 +08:00
|
|
|
if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD)
|
2018-04-04 05:16:03 +08:00
|
|
|
vlan->accel_priv =
|
2013-11-07 01:54:46 +08:00
|
|
|
lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
|
|
|
|
|
2018-04-04 05:16:09 +08:00
|
|
|
/* If earlier attempt to offload failed, or accel_priv is not
|
|
|
|
* populated we must add the unicast address to the lower device.
|
|
|
|
*/
|
|
|
|
if (IS_ERR_OR_NULL(vlan->accel_priv)) {
|
|
|
|
vlan->accel_priv = NULL;
|
|
|
|
err = dev_uc_add(lowerdev, dev->dev_addr);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
2013-11-07 01:54:46 +08:00
|
|
|
}
|
|
|
|
|
2008-07-15 11:57:07 +08:00
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
|
|
err = dev_set_allmulti(lowerdev, 1);
|
|
|
|
if (err < 0)
|
|
|
|
goto del_unicast;
|
|
|
|
}
|
2010-10-28 21:10:50 +08:00
|
|
|
|
2015-05-02 05:36:37 +08:00
|
|
|
if (dev->flags & IFF_PROMISC) {
|
|
|
|
err = dev_set_promiscuity(lowerdev, 1);
|
|
|
|
if (err < 0)
|
|
|
|
goto clear_multi;
|
|
|
|
}
|
|
|
|
|
2010-10-28 21:10:50 +08:00
|
|
|
hash_add:
|
2009-03-14 04:16:13 +08:00
|
|
|
macvlan_hash_add(vlan);
|
2007-07-15 09:55:06 +08:00
|
|
|
return 0;
|
2008-07-15 11:57:07 +08:00
|
|
|
|
2015-05-02 05:36:37 +08:00
|
|
|
clear_multi:
|
2016-11-22 09:54:36 +08:00
|
|
|
if (dev->flags & IFF_ALLMULTI)
|
|
|
|
dev_set_allmulti(lowerdev, -1);
|
2008-07-15 11:57:07 +08:00
|
|
|
del_unicast:
|
2018-04-04 05:16:03 +08:00
|
|
|
if (vlan->accel_priv) {
|
2013-11-07 01:54:46 +08:00
|
|
|
lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
|
2018-04-04 05:16:03 +08:00
|
|
|
vlan->accel_priv);
|
|
|
|
vlan->accel_priv = NULL;
|
2018-04-04 05:16:09 +08:00
|
|
|
} else {
|
|
|
|
dev_uc_del(lowerdev, dev->dev_addr);
|
2013-11-07 01:54:46 +08:00
|
|
|
}
|
2018-04-04 05:16:09 +08:00
|
|
|
out:
|
2008-07-15 11:57:07 +08:00
|
|
|
return err;
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macvlan_stop(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct net_device *lowerdev = vlan->lowerdev;
|
|
|
|
|
2018-04-04 05:16:03 +08:00
|
|
|
if (vlan->accel_priv) {
|
2013-11-07 01:54:46 +08:00
|
|
|
lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
|
2018-04-04 05:16:03 +08:00
|
|
|
vlan->accel_priv);
|
|
|
|
vlan->accel_priv = NULL;
|
2013-11-07 01:54:46 +08:00
|
|
|
}
|
|
|
|
|
2012-04-15 14:44:37 +08:00
|
|
|
dev_uc_unsync(lowerdev, dev);
|
|
|
|
dev_mc_unsync(lowerdev, dev);
|
|
|
|
|
2017-06-21 19:59:18 +08:00
|
|
|
if (macvlan_passthru(vlan->port)) {
|
2012-04-15 14:44:37 +08:00
|
|
|
if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
|
|
|
|
dev_set_promiscuity(lowerdev, -1);
|
2010-10-28 21:10:50 +08:00
|
|
|
goto hash_del;
|
|
|
|
}
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
if (dev->flags & IFF_ALLMULTI)
|
|
|
|
dev_set_allmulti(lowerdev, -1);
|
|
|
|
|
2015-05-02 05:36:37 +08:00
|
|
|
if (dev->flags & IFF_PROMISC)
|
|
|
|
dev_set_promiscuity(lowerdev, -1);
|
|
|
|
|
2010-04-02 05:22:09 +08:00
|
|
|
dev_uc_del(lowerdev, dev->dev_addr);
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2010-10-28 21:10:50 +08:00
|
|
|
hash_del:
|
2011-05-19 20:24:16 +08:00
|
|
|
macvlan_hash_del(vlan, !dev->dismantle);
|
2007-07-15 09:55:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-30 14:32:49 +08:00
|
|
|
static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
|
2007-11-20 14:00:42 +08:00
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct net_device *lowerdev = vlan->lowerdev;
|
2017-06-21 19:59:19 +08:00
|
|
|
struct macvlan_port *port = vlan->port;
|
2007-11-20 14:00:42 +08:00
|
|
|
int err;
|
|
|
|
|
2009-03-14 04:16:13 +08:00
|
|
|
if (!(dev->flags & IFF_UP)) {
|
|
|
|
/* Just copy in the new address */
|
2014-05-30 14:32:49 +08:00
|
|
|
ether_addr_copy(dev->dev_addr, addr);
|
2009-03-14 04:16:13 +08:00
|
|
|
} else {
|
|
|
|
/* Rehash and update the device filters */
|
2014-05-30 14:32:49 +08:00
|
|
|
if (macvlan_addr_busy(vlan->port, addr))
|
2018-12-01 07:26:27 +08:00
|
|
|
return -EADDRINUSE;
|
2007-11-20 14:00:42 +08:00
|
|
|
|
2017-06-21 19:59:19 +08:00
|
|
|
if (!macvlan_passthru(port)) {
|
2014-05-30 14:32:49 +08:00
|
|
|
err = dev_uc_add(lowerdev, addr);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2007-11-20 14:00:42 +08:00
|
|
|
|
2014-05-30 14:32:49 +08:00
|
|
|
dev_uc_del(lowerdev, dev->dev_addr);
|
|
|
|
}
|
2009-03-14 04:16:13 +08:00
|
|
|
|
2014-05-30 14:32:49 +08:00
|
|
|
macvlan_hash_change_addr(vlan, addr);
|
2009-03-14 04:16:13 +08:00
|
|
|
}
|
2017-06-21 19:59:19 +08:00
|
|
|
if (macvlan_passthru(port) && !macvlan_addr_change(port)) {
|
|
|
|
/* Since addr_change isn't set, we are here due to lower
|
|
|
|
* device change. Save the lower-dev address so we can
|
|
|
|
* restore it later.
|
|
|
|
*/
|
|
|
|
ether_addr_copy(vlan->port->perm_addr,
|
|
|
|
lowerdev->dev_addr);
|
|
|
|
}
|
|
|
|
macvlan_clear_addr_change(port);
|
2007-11-20 14:00:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-30 14:32:49 +08:00
|
|
|
static int macvlan_set_mac_address(struct net_device *dev, void *p)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct sockaddr *addr = p;
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(addr->sa_data))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
2017-06-21 19:59:16 +08:00
|
|
|
/* If the addresses are the same, this is a no-op */
|
|
|
|
if (ether_addr_equal(dev->dev_addr, addr->sa_data))
|
|
|
|
return 0;
|
|
|
|
|
2014-05-30 14:32:49 +08:00
|
|
|
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
|
2017-06-21 19:59:19 +08:00
|
|
|
macvlan_set_addr_change(vlan->port);
|
2018-12-13 19:54:30 +08:00
|
|
|
return dev_set_mac_address(vlan->lowerdev, addr, NULL);
|
2014-05-30 14:32:49 +08:00
|
|
|
}
|
|
|
|
|
2018-12-01 07:26:27 +08:00
|
|
|
if (macvlan_addr_busy(vlan->port, addr->sa_data))
|
|
|
|
return -EADDRINUSE;
|
|
|
|
|
2014-05-30 14:32:49 +08:00
|
|
|
return macvlan_sync_address(dev, addr->sa_data);
|
|
|
|
}
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
static void macvlan_change_rx_flags(struct net_device *dev, int change)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct net_device *lowerdev = vlan->lowerdev;
|
|
|
|
|
2014-05-08 17:15:37 +08:00
|
|
|
if (dev->flags & IFF_UP) {
|
|
|
|
if (change & IFF_ALLMULTI)
|
|
|
|
dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
|
2015-05-02 05:36:37 +08:00
|
|
|
if (change & IFF_PROMISC)
|
|
|
|
dev_set_promiscuity(lowerdev,
|
|
|
|
dev->flags & IFF_PROMISC ? 1 : -1);
|
|
|
|
|
2014-05-08 17:15:37 +08:00
|
|
|
}
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
2016-06-01 11:45:44 +08:00
|
|
|
static void macvlan_compute_filter(unsigned long *mc_filter,
|
|
|
|
struct net_device *dev,
|
|
|
|
struct macvlan_dev *vlan)
|
2007-07-15 09:55:06 +08:00
|
|
|
{
|
2013-02-06 04:22:50 +08:00
|
|
|
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
|
2016-06-01 11:45:44 +08:00
|
|
|
bitmap_fill(mc_filter, MACVLAN_MC_FILTER_SZ);
|
2013-02-06 04:22:50 +08:00
|
|
|
} else {
|
|
|
|
struct netdev_hw_addr *ha;
|
|
|
|
DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
|
|
|
|
|
|
|
|
bitmap_zero(filter, MACVLAN_MC_FILTER_SZ);
|
|
|
|
netdev_for_each_mc_addr(ha, dev) {
|
2013-02-08 00:41:02 +08:00
|
|
|
__set_bit(mc_hash(vlan, ha->addr), filter);
|
2013-02-06 04:22:50 +08:00
|
|
|
}
|
2013-02-08 00:02:57 +08:00
|
|
|
|
2013-02-08 00:41:02 +08:00
|
|
|
__set_bit(mc_hash(vlan, dev->broadcast), filter);
|
2013-02-08 00:02:57 +08:00
|
|
|
|
2016-06-01 11:45:44 +08:00
|
|
|
bitmap_copy(mc_filter, filter, MACVLAN_MC_FILTER_SZ);
|
2013-02-06 04:22:50 +08:00
|
|
|
}
|
2016-06-01 11:45:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macvlan_set_mac_lists(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
|
|
|
|
macvlan_compute_filter(vlan->mc_filter, dev, vlan);
|
|
|
|
|
2012-04-15 14:44:37 +08:00
|
|
|
dev_uc_sync(vlan->lowerdev, dev);
|
2007-07-15 09:55:06 +08:00
|
|
|
dev_mc_sync(vlan->lowerdev, dev);
|
2016-06-01 11:45:44 +08:00
|
|
|
|
|
|
|
/* This is slightly inaccurate as we're including the subscription
|
|
|
|
* list of vlan->lowerdev too.
|
|
|
|
*
|
|
|
|
* Bug alert: This only works if everyone has the same broadcast
|
|
|
|
* address as lowerdev. As soon as someone changes theirs this
|
|
|
|
* will break.
|
|
|
|
*
|
|
|
|
* However, this is already broken as when you change your broadcast
|
|
|
|
* address we don't get called.
|
|
|
|
*
|
|
|
|
* The solution is to maintain a list of broadcast addresses like
|
|
|
|
* we do for uc/mc, if you care.
|
|
|
|
*/
|
|
|
|
macvlan_compute_filter(vlan->port->mc_filter, vlan->lowerdev, NULL);
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
if (vlan->lowerdev->mtu < new_mtu)
|
2007-07-15 09:55:06 +08:00
|
|
|
return -EINVAL;
|
|
|
|
dev->mtu = new_mtu;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-20 10:23:33 +08:00
|
|
|
static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|
|
|
{
|
|
|
|
struct net_device *real_dev = macvlan_dev_real_dev(dev);
|
|
|
|
const struct net_device_ops *ops = real_dev->netdev_ops;
|
|
|
|
struct ifreq ifrr;
|
|
|
|
int err = -EOPNOTSUPP;
|
|
|
|
|
2019-05-28 02:38:55 +08:00
|
|
|
strscpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
|
2019-03-20 10:23:33 +08:00
|
|
|
ifrr.ifr_ifru = ifr->ifr_ifru;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCSHWTSTAMP:
|
2019-05-09 14:54:08 +08:00
|
|
|
if (!net_eq(dev_net(dev), &init_net))
|
|
|
|
break;
|
2019-05-20 22:44:49 +08:00
|
|
|
/* fall through */
|
2019-03-20 10:23:33 +08:00
|
|
|
case SIOCGHWTSTAMP:
|
|
|
|
if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
|
|
|
|
err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!err)
|
|
|
|
ifr->ifr_ifru = ifrr.ifr_ifru;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
/*
|
|
|
|
* macvlan network devices have devices nesting below it and are a special
|
|
|
|
* "super class" of normal network devices; split their locks off into a
|
|
|
|
* separate class since they always nest.
|
|
|
|
*/
|
2008-07-23 05:16:42 +08:00
|
|
|
static struct lock_class_key macvlan_netdev_addr_lock_key;
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2017-05-11 23:09:52 +08:00
|
|
|
#define ALWAYS_ON_OFFLOADS \
|
|
|
|
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
|
2017-08-17 05:34:46 +08:00
|
|
|
NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
|
2014-03-04 04:33:53 +08:00
|
|
|
|
2017-05-11 23:09:52 +08:00
|
|
|
#define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX)
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
#define MACVLAN_FEATURES \
|
2015-12-15 03:19:43 +08:00
|
|
|
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
|
2017-07-03 21:33:08 +08:00
|
|
|
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_LRO | \
|
2011-06-06 12:27:16 +08:00
|
|
|
NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
|
2013-04-19 10:04:32 +08:00
|
|
|
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
|
2007-07-15 09:55:06 +08:00
|
|
|
|
|
|
|
#define MACVLAN_STATE_MASK \
|
|
|
|
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
|
|
|
|
|
2014-05-17 05:04:56 +08:00
|
|
|
static int macvlan_get_nest_level(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
|
|
|
|
}
|
|
|
|
|
2008-07-09 14:13:53 +08:00
|
|
|
static void macvlan_set_lockdep_class(struct net_device *dev)
|
|
|
|
{
|
2016-06-09 22:45:14 +08:00
|
|
|
netdev_lockdep_set_classes(dev);
|
2014-05-17 05:04:56 +08:00
|
|
|
lockdep_set_class_and_subclass(&dev->addr_list_lock,
|
|
|
|
&macvlan_netdev_addr_lock_key,
|
|
|
|
macvlan_get_nest_level(dev));
|
2008-07-09 14:13:53 +08:00
|
|
|
}
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
static int macvlan_init(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
const struct net_device *lowerdev = vlan->lowerdev;
|
2016-04-24 06:03:32 +08:00
|
|
|
struct macvlan_port *port = vlan->port;
|
2007-07-15 09:55:06 +08:00
|
|
|
|
|
|
|
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
|
|
|
|
(lowerdev->state & MACVLAN_STATE_MASK);
|
|
|
|
dev->features = lowerdev->features & MACVLAN_FEATURES;
|
2014-03-04 04:33:53 +08:00
|
|
|
dev->features |= ALWAYS_ON_FEATURES;
|
2014-12-06 00:05:49 +08:00
|
|
|
dev->hw_features |= NETIF_F_LRO;
|
2014-07-31 22:30:25 +08:00
|
|
|
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
|
2017-05-11 23:09:52 +08:00
|
|
|
dev->vlan_features |= ALWAYS_ON_OFFLOADS;
|
2017-08-17 05:34:46 +08:00
|
|
|
dev->hw_enc_features |= dev->features;
|
2009-11-24 06:18:53 +08:00
|
|
|
dev->gso_max_size = lowerdev->gso_max_size;
|
2016-03-17 12:59:49 +08:00
|
|
|
dev->gso_max_segs = lowerdev->gso_max_segs;
|
2009-06-10 17:55:02 +08:00
|
|
|
dev->hard_header_len = lowerdev->hard_header_len;
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2008-07-09 14:13:53 +08:00
|
|
|
macvlan_set_lockdep_class(dev);
|
|
|
|
|
2014-02-14 03:46:28 +08:00
|
|
|
vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
|
2010-11-11 05:14:04 +08:00
|
|
|
if (!vlan->pcpu_stats)
|
2009-11-17 16:53:49 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-04-24 06:03:32 +08:00
|
|
|
port->count += 1;
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-17 16:53:49 +08:00
|
|
|
static void macvlan_uninit(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
2011-03-22 09:22:22 +08:00
|
|
|
struct macvlan_port *port = vlan->port;
|
2009-11-17 16:53:49 +08:00
|
|
|
|
2010-11-11 05:14:04 +08:00
|
|
|
free_percpu(vlan->pcpu_stats);
|
2011-03-22 09:22:22 +08:00
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
macvlan_flush_sources(port, vlan);
|
2014-08-15 05:32:49 +08:00
|
|
|
port->count -= 1;
|
|
|
|
if (!port->count)
|
2011-03-22 09:22:22 +08:00
|
|
|
macvlan_port_destroy(port->dev);
|
2009-11-17 16:53:49 +08:00
|
|
|
}
|
|
|
|
|
2017-01-07 11:12:52 +08:00
|
|
|
static void macvlan_dev_get_stats64(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *stats)
|
2009-11-17 16:53:49 +08:00
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
|
2010-11-11 05:14:04 +08:00
|
|
|
if (vlan->pcpu_stats) {
|
2014-01-04 14:22:34 +08:00
|
|
|
struct vlan_pcpu_stats *p;
|
2010-11-11 05:14:04 +08:00
|
|
|
u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
|
|
|
|
u32 rx_errors = 0, tx_dropped = 0;
|
2010-06-24 08:54:21 +08:00
|
|
|
unsigned int start;
|
2009-11-17 16:53:49 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
2010-11-11 05:14:04 +08:00
|
|
|
p = per_cpu_ptr(vlan->pcpu_stats, i);
|
2010-06-24 08:54:21 +08:00
|
|
|
do {
|
2014-03-14 12:26:42 +08:00
|
|
|
start = u64_stats_fetch_begin_irq(&p->syncp);
|
2010-06-24 08:54:21 +08:00
|
|
|
rx_packets = p->rx_packets;
|
|
|
|
rx_bytes = p->rx_bytes;
|
|
|
|
rx_multicast = p->rx_multicast;
|
2010-11-11 05:14:04 +08:00
|
|
|
tx_packets = p->tx_packets;
|
|
|
|
tx_bytes = p->tx_bytes;
|
2014-03-14 12:26:42 +08:00
|
|
|
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
2010-11-11 05:14:04 +08:00
|
|
|
|
|
|
|
stats->rx_packets += rx_packets;
|
|
|
|
stats->rx_bytes += rx_bytes;
|
|
|
|
stats->multicast += rx_multicast;
|
|
|
|
stats->tx_packets += tx_packets;
|
|
|
|
stats->tx_bytes += tx_bytes;
|
|
|
|
/* rx_errors & tx_dropped are u32, updated
|
|
|
|
* without syncp protection.
|
|
|
|
*/
|
|
|
|
rx_errors += p->rx_errors;
|
|
|
|
tx_dropped += p->tx_dropped;
|
2009-11-17 16:53:49 +08:00
|
|
|
}
|
2010-11-11 05:14:04 +08:00
|
|
|
stats->rx_errors = rx_errors;
|
|
|
|
stats->rx_dropped = rx_errors;
|
|
|
|
stats->tx_dropped = tx_dropped;
|
2009-11-17 16:53:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-09 08:52:37 +08:00
|
|
|
static int macvlan_vlan_rx_add_vid(struct net_device *dev,
|
2013-04-19 10:04:28 +08:00
|
|
|
__be16 proto, u16 vid)
|
2011-06-06 12:27:16 +08:00
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct net_device *lowerdev = vlan->lowerdev;
|
|
|
|
|
2013-04-19 10:04:28 +08:00
|
|
|
return vlan_vid_add(lowerdev, proto, vid);
|
2011-06-06 12:27:16 +08:00
|
|
|
}
|
|
|
|
|
2011-12-09 08:52:37 +08:00
|
|
|
static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
|
2013-04-19 10:04:28 +08:00
|
|
|
__be16 proto, u16 vid)
|
2011-06-06 12:27:16 +08:00
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct net_device *lowerdev = vlan->lowerdev;
|
|
|
|
|
2013-04-19 10:04:28 +08:00
|
|
|
vlan_vid_del(lowerdev, proto, vid);
|
2011-12-09 08:52:37 +08:00
|
|
|
return 0;
|
2011-06-06 12:27:16 +08:00
|
|
|
}
|
|
|
|
|
2012-10-01 20:32:33 +08:00
|
|
|
static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
|
2012-04-15 14:44:37 +08:00
|
|
|
struct net_device *dev,
|
2014-11-28 21:34:15 +08:00
|
|
|
const unsigned char *addr, u16 vid,
|
2019-01-17 07:06:50 +08:00
|
|
|
u16 flags,
|
|
|
|
struct netlink_ext_ack *extack)
|
2012-04-15 14:44:37 +08:00
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
2014-08-16 01:04:59 +08:00
|
|
|
/* Support unicast filter only on passthru devices.
|
|
|
|
* Multicast filter should be allowed on all devices.
|
|
|
|
*/
|
2017-06-21 19:59:18 +08:00
|
|
|
if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
|
2012-04-15 14:44:37 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2013-07-19 23:20:08 +08:00
|
|
|
if (flags & NLM_F_REPLACE)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2012-04-15 14:44:37 +08:00
|
|
|
if (is_unicast_ether_addr(addr))
|
|
|
|
err = dev_uc_add_excl(dev, addr);
|
|
|
|
else if (is_multicast_ether_addr(addr))
|
|
|
|
err = dev_mc_add_excl(dev, addr);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-02-13 20:00:18 +08:00
|
|
|
static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
|
2012-04-15 14:44:37 +08:00
|
|
|
struct net_device *dev,
|
2014-11-28 21:34:15 +08:00
|
|
|
const unsigned char *addr, u16 vid)
|
2012-04-15 14:44:37 +08:00
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
2014-08-16 01:04:59 +08:00
|
|
|
/* Support unicast filter only on passthru devices.
|
|
|
|
* Multicast filter should be allowed on all devices.
|
|
|
|
*/
|
2017-06-21 19:59:18 +08:00
|
|
|
if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
|
2012-04-15 14:44:37 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (is_unicast_ether_addr(addr))
|
|
|
|
err = dev_uc_del(dev, addr);
|
|
|
|
else if (is_multicast_ether_addr(addr))
|
|
|
|
err = dev_mc_del(dev, addr);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
|
|
|
|
struct ethtool_drvinfo *drvinfo)
|
|
|
|
{
|
2013-01-06 08:44:26 +08:00
|
|
|
strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
|
|
|
|
strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
2016-02-25 02:58:04 +08:00
|
|
|
static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
|
|
|
|
struct ethtool_link_ksettings *cmd)
|
2008-10-30 06:31:53 +08:00
|
|
|
{
|
|
|
|
const struct macvlan_dev *vlan = netdev_priv(dev);
|
2011-09-03 11:34:30 +08:00
|
|
|
|
2016-02-25 02:58:04 +08:00
|
|
|
return __ethtool_get_link_ksettings(vlan->lowerdev, cmd);
|
2008-10-30 06:31:53 +08:00
|
|
|
}
|
|
|
|
|
2019-03-20 10:23:33 +08:00
|
|
|
static int macvlan_ethtool_get_ts_info(struct net_device *dev,
|
|
|
|
struct ethtool_ts_info *info)
|
|
|
|
{
|
|
|
|
struct net_device *real_dev = macvlan_dev_real_dev(dev);
|
|
|
|
const struct ethtool_ops *ops = real_dev->ethtool_ops;
|
|
|
|
struct phy_device *phydev = real_dev->phydev;
|
|
|
|
|
|
|
|
if (phydev && phydev->drv && phydev->drv->ts_info) {
|
|
|
|
return phydev->drv->ts_info(phydev, info);
|
|
|
|
} else if (ops->get_ts_info) {
|
|
|
|
return ops->get_ts_info(real_dev, info);
|
|
|
|
} else {
|
|
|
|
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
|
|
|
|
SOF_TIMESTAMPING_SOFTWARE;
|
|
|
|
info->phc_index = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-26 04:04:21 +08:00
|
|
|
static netdev_features_t macvlan_fix_features(struct net_device *dev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
2014-12-06 00:05:49 +08:00
|
|
|
netdev_features_t lowerdev_features = vlan->lowerdev->features;
|
2013-12-26 19:17:00 +08:00
|
|
|
netdev_features_t mask;
|
2013-06-26 04:04:21 +08:00
|
|
|
|
2013-12-26 19:17:00 +08:00
|
|
|
features |= NETIF_F_ALL_FOR_ALL;
|
|
|
|
features &= (vlan->set_features | ~MACVLAN_FEATURES);
|
|
|
|
mask = features;
|
|
|
|
|
2014-12-06 00:05:49 +08:00
|
|
|
lowerdev_features &= (features | ~NETIF_F_LRO);
|
|
|
|
features = netdev_increment_features(lowerdev_features, features, mask);
|
2014-03-04 04:33:53 +08:00
|
|
|
features |= ALWAYS_ON_FEATURES;
|
2018-03-09 08:17:23 +08:00
|
|
|
features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);
|
2013-12-26 19:17:00 +08:00
|
|
|
|
|
|
|
return features;
|
2013-06-26 04:04:21 +08:00
|
|
|
}
|
|
|
|
|
2014-05-30 16:00:56 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static void macvlan_dev_poll_controller(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct net_device *real_dev = vlan->lowerdev;
|
|
|
|
struct netpoll *netpoll;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
|
|
|
|
err = -ENOMEM;
|
|
|
|
if (!netpoll)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = __netpoll_setup(netpoll, real_dev);
|
|
|
|
if (err) {
|
|
|
|
kfree(netpoll);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
vlan->netpoll = netpoll;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct netpoll *netpoll = vlan->netpoll;
|
|
|
|
|
|
|
|
if (!netpoll)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vlan->netpoll = NULL;
|
|
|
|
|
2018-10-18 23:18:26 +08:00
|
|
|
__netpoll_free(netpoll);
|
2014-05-30 16:00:56 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
|
|
|
|
2015-04-02 23:07:05 +08:00
|
|
|
static int macvlan_dev_get_iflink(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
|
|
|
|
return vlan->lowerdev->ifindex;
|
|
|
|
}
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
static const struct ethtool_ops macvlan_ethtool_ops = {
|
|
|
|
.get_link = ethtool_op_get_link,
|
2016-02-25 02:58:04 +08:00
|
|
|
.get_link_ksettings = macvlan_ethtool_get_link_ksettings,
|
2007-07-15 09:55:06 +08:00
|
|
|
.get_drvinfo = macvlan_ethtool_get_drvinfo,
|
2019-03-20 10:23:33 +08:00
|
|
|
.get_ts_info = macvlan_ethtool_get_ts_info,
|
2007-07-15 09:55:06 +08:00
|
|
|
};
|
|
|
|
|
2008-11-20 13:51:06 +08:00
|
|
|
static const struct net_device_ops macvlan_netdev_ops = {
|
|
|
|
.ndo_init = macvlan_init,
|
2009-11-17 16:53:49 +08:00
|
|
|
.ndo_uninit = macvlan_uninit,
|
2008-11-20 13:51:06 +08:00
|
|
|
.ndo_open = macvlan_open,
|
|
|
|
.ndo_stop = macvlan_stop,
|
2008-11-21 12:14:53 +08:00
|
|
|
.ndo_start_xmit = macvlan_start_xmit,
|
2008-11-20 13:51:06 +08:00
|
|
|
.ndo_change_mtu = macvlan_change_mtu,
|
2019-03-20 10:23:33 +08:00
|
|
|
.ndo_do_ioctl = macvlan_do_ioctl,
|
2013-06-26 04:04:21 +08:00
|
|
|
.ndo_fix_features = macvlan_fix_features,
|
2008-11-20 13:51:06 +08:00
|
|
|
.ndo_change_rx_flags = macvlan_change_rx_flags,
|
|
|
|
.ndo_set_mac_address = macvlan_set_mac_address,
|
2012-04-15 14:44:37 +08:00
|
|
|
.ndo_set_rx_mode = macvlan_set_mac_lists,
|
2010-06-24 08:54:21 +08:00
|
|
|
.ndo_get_stats64 = macvlan_dev_get_stats64,
|
2008-11-20 13:51:06 +08:00
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
2011-06-06 12:27:16 +08:00
|
|
|
.ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid,
|
|
|
|
.ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid,
|
2012-04-15 14:44:37 +08:00
|
|
|
.ndo_fdb_add = macvlan_fdb_add,
|
|
|
|
.ndo_fdb_del = macvlan_fdb_del,
|
|
|
|
.ndo_fdb_dump = ndo_dflt_fdb_dump,
|
2014-05-17 05:04:56 +08:00
|
|
|
.ndo_get_lock_subclass = macvlan_get_nest_level,
|
2014-05-30 16:00:56 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = macvlan_dev_poll_controller,
|
|
|
|
.ndo_netpoll_setup = macvlan_dev_netpoll_setup,
|
|
|
|
.ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup,
|
|
|
|
#endif
|
2015-04-02 23:07:05 +08:00
|
|
|
.ndo_get_iflink = macvlan_dev_get_iflink,
|
2015-07-31 14:03:24 +08:00
|
|
|
.ndo_features_check = passthru_features_check,
|
2019-02-23 02:06:37 +08:00
|
|
|
.ndo_change_proto_down = dev_change_proto_down_generic,
|
2008-11-20 13:51:06 +08:00
|
|
|
};
|
|
|
|
|
2010-07-22 05:44:31 +08:00
|
|
|
void macvlan_common_setup(struct net_device *dev)
|
2007-07-15 09:55:06 +08:00
|
|
|
{
|
|
|
|
ether_setup(dev);
|
|
|
|
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
dev->min_mtu = 0;
|
|
|
|
dev->max_mtu = ETH_MAX_MTU;
|
2014-10-06 09:38:35 +08:00
|
|
|
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
|
|
|
netif_keep_dst(dev);
|
2013-03-07 18:21:48 +08:00
|
|
|
dev->priv_flags |= IFF_UNICAST_FLT;
|
2008-11-20 13:51:06 +08:00
|
|
|
dev->netdev_ops = &macvlan_netdev_ops;
|
net: Fix inconsistent teardown and release of private netdev state.
Network devices can allocate reasources and private memory using
netdev_ops->ndo_init(). However, the release of these resources
can occur in one of two different places.
Either netdev_ops->ndo_uninit() or netdev->destructor().
The decision of which operation frees the resources depends upon
whether it is necessary for all netdev refs to be released before it
is safe to perform the freeing.
netdev_ops->ndo_uninit() presumably can occur right after the
NETDEV_UNREGISTER notifier completes and the unicast and multicast
address lists are flushed.
netdev->destructor(), on the other hand, does not run until the
netdev references all go away.
Further complicating the situation is that netdev->destructor()
almost universally does also a free_netdev().
This creates a problem for the logic in register_netdevice().
Because all callers of register_netdevice() manage the freeing
of the netdev, and invoke free_netdev(dev) if register_netdevice()
fails.
If netdev_ops->ndo_init() succeeds, but something else fails inside
of register_netdevice(), it does call ndo_ops->ndo_uninit(). But
it is not able to invoke netdev->destructor().
This is because netdev->destructor() will do a free_netdev() and
then the caller of register_netdevice() will do the same.
However, this means that the resources that would normally be released
by netdev->destructor() will not be.
Over the years drivers have added local hacks to deal with this, by
invoking their destructor parts by hand when register_netdevice()
fails.
Many drivers do not try to deal with this, and instead we have leaks.
Let's close this hole by formalizing the distinction between what
private things need to be freed up by netdev->destructor() and whether
the driver needs unregister_netdevice() to perform the free_netdev().
netdev->priv_destructor() performs all actions to free up the private
resources that used to be freed by netdev->destructor(), except for
free_netdev().
netdev->needs_free_netdev is a boolean that indicates whether
free_netdev() should be done at the end of unregister_netdevice().
Now, register_netdevice() can sanely release all resources after
ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit()
and netdev->priv_destructor().
And at the end of unregister_netdevice(), we invoke
netdev->priv_destructor() and optionally call free_netdev().
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-09 00:52:56 +08:00
|
|
|
dev->needs_free_netdev = true;
|
2013-08-28 19:34:31 +08:00
|
|
|
dev->header_ops = &macvlan_hard_header_ops;
|
2007-07-15 09:55:06 +08:00
|
|
|
dev->ethtool_ops = &macvlan_ethtool_ops;
|
2010-07-22 05:44:31 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(macvlan_common_setup);
|
|
|
|
|
|
|
|
static void macvlan_setup(struct net_device *dev)
|
|
|
|
{
|
|
|
|
macvlan_common_setup(dev);
|
2016-02-14 22:10:39 +08:00
|
|
|
dev->priv_flags |= IFF_NO_QUEUE;
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macvlan_port_create(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macvlan_port *port;
|
|
|
|
unsigned int i;
|
2010-06-02 05:52:08 +08:00
|
|
|
int err;
|
2007-07-15 09:55:06 +08:00
|
|
|
|
|
|
|
if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-01-19 07:02:55 +08:00
|
|
|
if (netdev_is_rx_handler_busy(dev))
|
2014-12-07 07:53:46 +08:00
|
|
|
return -EBUSY;
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
port = kzalloc(sizeof(*port), GFP_KERNEL);
|
|
|
|
if (port == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
port->dev = dev;
|
2017-06-21 19:59:19 +08:00
|
|
|
ether_addr_copy(port->perm_addr, dev->dev_addr);
|
2007-07-15 09:55:06 +08:00
|
|
|
INIT_LIST_HEAD(&port->vlans);
|
|
|
|
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
|
|
|
|
INIT_HLIST_HEAD(&port->vlan_hash[i]);
|
2014-09-25 22:31:08 +08:00
|
|
|
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
|
|
|
|
INIT_HLIST_HEAD(&port->vlan_source_hash[i]);
|
2010-06-02 05:52:08 +08:00
|
|
|
|
2014-04-17 13:45:59 +08:00
|
|
|
skb_queue_head_init(&port->bc_queue);
|
|
|
|
INIT_WORK(&port->bc_work, macvlan_process_broadcast);
|
|
|
|
|
2010-06-15 11:27:57 +08:00
|
|
|
err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
|
|
|
|
if (err)
|
2010-06-02 05:52:08 +08:00
|
|
|
kfree(port);
|
2011-05-21 02:59:23 +08:00
|
|
|
else
|
|
|
|
dev->priv_flags |= IFF_MACVLAN_PORT;
|
2010-06-02 05:52:08 +08:00
|
|
|
return err;
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macvlan_port_destroy(struct net_device *dev)
|
|
|
|
{
|
2013-03-30 18:08:44 +08:00
|
|
|
struct macvlan_port *port = macvlan_port_get_rtnl(dev);
|
2017-04-20 20:55:12 +08:00
|
|
|
struct sk_buff *skb;
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2010-06-15 11:27:57 +08:00
|
|
|
dev->priv_flags &= ~IFF_MACVLAN_PORT;
|
2010-06-02 05:52:08 +08:00
|
|
|
netdev_rx_handler_unregister(dev);
|
2014-10-23 10:43:46 +08:00
|
|
|
|
|
|
|
/* After this point, no packet can schedule bc_work anymore,
|
|
|
|
* but we need to cancel it and purge left skbs if any.
|
|
|
|
*/
|
|
|
|
cancel_work_sync(&port->bc_work);
|
2017-04-20 20:55:12 +08:00
|
|
|
|
|
|
|
while ((skb = __skb_dequeue(&port->bc_queue))) {
|
|
|
|
const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
|
|
|
|
|
|
|
|
if (src)
|
|
|
|
dev_put(src->dev);
|
|
|
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
}
|
2014-10-23 10:43:46 +08:00
|
|
|
|
2017-06-21 19:59:19 +08:00
|
|
|
/* If the lower device address has been changed by passthru
|
|
|
|
* macvlan, put it back.
|
|
|
|
*/
|
|
|
|
if (macvlan_passthru(port) &&
|
|
|
|
!ether_addr_equal(port->dev->dev_addr, port->perm_addr)) {
|
|
|
|
struct sockaddr sa;
|
|
|
|
|
|
|
|
sa.sa_family = port->dev->type;
|
|
|
|
memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len);
|
2018-12-13 19:54:30 +08:00
|
|
|
dev_set_mac_address(port->dev, &sa, NULL);
|
2017-06-21 19:59:19 +08:00
|
|
|
}
|
|
|
|
|
2016-12-07 12:23:18 +08:00
|
|
|
kfree(port);
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
|
2017-06-26 05:56:01 +08:00
|
|
|
static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2007-07-15 09:55:06 +08:00
|
|
|
{
|
|
|
|
if (tb[IFLA_ADDRESS]) {
|
|
|
|
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
}
|
2009-11-26 14:07:11 +08:00
|
|
|
|
2017-09-20 08:12:23 +08:00
|
|
|
if (!data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (data[IFLA_MACVLAN_FLAGS] &&
|
2013-08-05 23:25:54 +08:00
|
|
|
nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-09-20 08:12:23 +08:00
|
|
|
if (data[IFLA_MACVLAN_MODE]) {
|
2009-11-26 14:07:11 +08:00
|
|
|
switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
|
|
|
|
case MACVLAN_MODE_PRIVATE:
|
|
|
|
case MACVLAN_MODE_VEPA:
|
|
|
|
case MACVLAN_MODE_BRIDGE:
|
2010-10-28 21:10:50 +08:00
|
|
|
case MACVLAN_MODE_PASSTHRU:
|
2014-09-25 22:31:08 +08:00
|
|
|
case MACVLAN_MODE_SOURCE:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-20 08:12:23 +08:00
|
|
|
if (data[IFLA_MACVLAN_MACADDR_MODE]) {
|
2014-09-25 22:31:08 +08:00
|
|
|
switch (nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE])) {
|
|
|
|
case MACVLAN_MACADDR_ADD:
|
|
|
|
case MACVLAN_MACADDR_DEL:
|
|
|
|
case MACVLAN_MACADDR_FLUSH:
|
|
|
|
case MACVLAN_MACADDR_SET:
|
2009-11-26 14:07:11 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
2014-09-25 22:31:08 +08:00
|
|
|
|
2017-09-20 08:12:23 +08:00
|
|
|
if (data[IFLA_MACVLAN_MACADDR]) {
|
2014-09-25 22:31:08 +08:00
|
|
|
if (nla_len(data[IFLA_MACVLAN_MACADDR]) != ETH_ALEN)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(nla_data(data[IFLA_MACVLAN_MACADDR])))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
}
|
|
|
|
|
2017-09-20 08:12:23 +08:00
|
|
|
if (data[IFLA_MACVLAN_MACADDR_COUNT])
|
2014-09-25 22:31:08 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* reconfigure list of remote source mac address
|
|
|
|
* (only for macvlan devices in source mode)
|
|
|
|
* Note regarding alignment: all netlink data is aligned to 4 Byte, which
|
|
|
|
* suffices for both ether_addr_copy and ether_addr_equal_64bits usage.
|
|
|
|
*/
|
|
|
|
static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
|
|
|
|
struct nlattr *data[])
|
|
|
|
{
|
|
|
|
char *addr = NULL;
|
|
|
|
int ret, rem, len;
|
|
|
|
struct nlattr *nla, *head;
|
|
|
|
struct macvlan_source_entry *entry;
|
|
|
|
|
|
|
|
if (data[IFLA_MACVLAN_MACADDR])
|
|
|
|
addr = nla_data(data[IFLA_MACVLAN_MACADDR]);
|
|
|
|
|
|
|
|
if (mode == MACVLAN_MACADDR_ADD) {
|
|
|
|
if (!addr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return macvlan_hash_add_source(vlan, addr);
|
|
|
|
|
|
|
|
} else if (mode == MACVLAN_MACADDR_DEL) {
|
|
|
|
if (!addr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
entry = macvlan_hash_lookup_source(vlan, addr);
|
|
|
|
if (entry) {
|
|
|
|
macvlan_hash_del_source(entry);
|
|
|
|
vlan->macaddr_count--;
|
|
|
|
}
|
|
|
|
} else if (mode == MACVLAN_MACADDR_FLUSH) {
|
|
|
|
macvlan_flush_sources(vlan->port, vlan);
|
|
|
|
} else if (mode == MACVLAN_MACADDR_SET) {
|
|
|
|
macvlan_flush_sources(vlan->port, vlan);
|
|
|
|
|
|
|
|
if (addr) {
|
|
|
|
ret = macvlan_hash_add_source(vlan, addr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!data || !data[IFLA_MACVLAN_MACADDR_DATA])
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
|
|
|
|
len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
|
|
|
|
|
|
|
|
nla_for_each_attr(nla, head, len, rem) {
|
|
|
|
if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
|
|
|
|
nla_len(nla) != ETH_ALEN)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
addr = nla_data(nla);
|
|
|
|
ret = macvlan_hash_add_source(vlan, addr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-01-30 20:23:40 +08:00
|
|
|
int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
|
2017-10-05 08:48:47 +08:00
|
|
|
struct nlattr *tb[], struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2007-07-15 09:55:06 +08:00
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
struct macvlan_port *port;
|
|
|
|
struct net_device *lowerdev;
|
|
|
|
int err;
|
2014-09-25 22:31:08 +08:00
|
|
|
int macmode;
|
2016-11-04 10:28:49 +08:00
|
|
|
bool create = false;
|
2007-07-15 09:55:06 +08:00
|
|
|
|
|
|
|
if (!tb[IFLA_LINK])
|
|
|
|
return -EINVAL;
|
|
|
|
|
2009-11-08 16:53:51 +08:00
|
|
|
lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
|
2007-07-15 09:55:06 +08:00
|
|
|
if (lowerdev == NULL)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2013-12-03 18:55:22 +08:00
|
|
|
/* When creating macvlans or macvtaps on top of other macvlans - use
|
2009-03-14 04:15:37 +08:00
|
|
|
* the real device as the lowerdev.
|
2008-01-11 14:39:28 +08:00
|
|
|
*/
|
2013-12-03 18:55:22 +08:00
|
|
|
if (netif_is_macvlan(lowerdev))
|
|
|
|
lowerdev = macvlan_dev_real_dev(lowerdev);
|
2008-01-11 14:39:28 +08:00
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
if (!tb[IFLA_MTU])
|
|
|
|
dev->mtu = lowerdev->mtu;
|
|
|
|
else if (dev->mtu > lowerdev->mtu)
|
|
|
|
return -EINVAL;
|
|
|
|
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
/* MTU range: 68 - lowerdev->max_mtu */
|
|
|
|
dev->min_mtu = ETH_MIN_MTU;
|
|
|
|
dev->max_mtu = lowerdev->max_mtu;
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
if (!tb[IFLA_ADDRESS])
|
2012-02-15 14:45:40 +08:00
|
|
|
eth_hw_addr_random(dev);
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2019-01-31 17:48:10 +08:00
|
|
|
if (!netif_is_macvlan_port(lowerdev)) {
|
2007-07-15 09:55:06 +08:00
|
|
|
err = macvlan_port_create(lowerdev);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2016-11-04 10:28:49 +08:00
|
|
|
create = true;
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
2013-03-30 18:08:44 +08:00
|
|
|
port = macvlan_port_get_rtnl(lowerdev);
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2010-10-28 21:10:50 +08:00
|
|
|
/* Only 1 macvlan device can be created in passthru mode */
|
2017-06-21 19:59:18 +08:00
|
|
|
if (macvlan_passthru(port)) {
|
2016-11-04 10:28:49 +08:00
|
|
|
/* The macvlan port must be not created this time,
|
|
|
|
* still goto destroy_macvlan_port for readability.
|
|
|
|
*/
|
|
|
|
err = -EINVAL;
|
|
|
|
goto destroy_macvlan_port;
|
|
|
|
}
|
2010-10-28 21:10:50 +08:00
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
vlan->lowerdev = lowerdev;
|
|
|
|
vlan->dev = dev;
|
|
|
|
vlan->port = port;
|
2013-06-26 04:04:21 +08:00
|
|
|
vlan->set_features = MACVLAN_FEATURES;
|
net: remove type_check from dev_get_nest_level()
The idea for type_check in dev_get_nest_level() was to count the number
of nested devices of the same type (currently, only macvlan or vlan
devices).
This prevented the false positive lockdep warning on configurations such
as:
eth0 <--- macvlan0 <--- vlan0 <--- macvlan1
However, this doesn't prevent a warning on a configuration such as:
eth0 <--- macvlan0 <--- vlan0
eth1 <--- vlan1 <--- macvlan1
In this case, all the locks end up with a nesting subclass of 1, so
lockdep thinks that there is still a deadlock:
- in the first case we have (macvlan_netdev_addr_lock_key, 1) and then
take (vlan_netdev_xmit_lock_key, 1)
- in the second case, we have (vlan_netdev_xmit_lock_key, 1) and then
take (macvlan_netdev_addr_lock_key, 1)
By removing the linktype check in dev_get_nest_level() and always
incrementing the nesting depth, lockdep considers this configuration
valid.
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-12 22:10:33 +08:00
|
|
|
vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2009-11-26 14:07:11 +08:00
|
|
|
vlan->mode = MACVLAN_MODE_VEPA;
|
|
|
|
if (data && data[IFLA_MACVLAN_MODE])
|
|
|
|
vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
|
|
|
|
|
2012-04-15 14:44:37 +08:00
|
|
|
if (data && data[IFLA_MACVLAN_FLAGS])
|
|
|
|
vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
|
|
|
|
|
2010-10-28 21:10:50 +08:00
|
|
|
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
|
2016-11-04 10:28:49 +08:00
|
|
|
if (port->count) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto destroy_macvlan_port;
|
|
|
|
}
|
2017-06-21 19:59:18 +08:00
|
|
|
macvlan_set_passthru(port);
|
2013-08-31 00:08:47 +08:00
|
|
|
eth_hw_addr_inherit(dev, lowerdev);
|
2010-10-28 21:10:50 +08:00
|
|
|
}
|
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
|
2016-11-04 10:28:49 +08:00
|
|
|
if (vlan->mode != MACVLAN_MODE_SOURCE) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto destroy_macvlan_port;
|
|
|
|
}
|
2014-09-25 22:31:08 +08:00
|
|
|
macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]);
|
|
|
|
err = macvlan_changelink_sources(vlan, macmode, data);
|
|
|
|
if (err)
|
2016-11-04 10:28:49 +08:00
|
|
|
goto destroy_macvlan_port;
|
2014-09-25 22:31:08 +08:00
|
|
|
}
|
|
|
|
|
2013-10-22 05:28:02 +08:00
|
|
|
err = register_netdevice(dev);
|
|
|
|
if (err < 0)
|
2016-11-04 10:28:49 +08:00
|
|
|
goto destroy_macvlan_port;
|
2013-10-22 05:28:02 +08:00
|
|
|
|
2013-11-07 01:54:46 +08:00
|
|
|
dev->priv_flags |= IFF_MACVLAN;
|
2017-10-05 08:48:47 +08:00
|
|
|
err = netdev_upper_dev_link(lowerdev, dev, extack);
|
2013-01-04 06:48:50 +08:00
|
|
|
if (err)
|
2014-02-12 07:51:29 +08:00
|
|
|
goto unregister_netdev;
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2013-05-09 12:23:40 +08:00
|
|
|
list_add_tail_rcu(&vlan->list, &port->vlans);
|
2009-12-04 07:59:22 +08:00
|
|
|
netif_stacked_transfer_operstate(lowerdev, dev);
|
macvlan: make operstate and carrier more accurate
Currently when a macvlan is being initialized and the lower device is
netif_carrier_ok(), the macvlan device doesn't run through
rfc2863_policy() and is left with UNKNOWN operstate. Fix it by adding an
unconditional linkwatch event for the new macvlan device. Similar fix is
already used by the 8021q device (see register_vlan_dev()). Also fix the
inconsistent state when the lower device has been down and its carrier
was changed (when a device is down NETDEV_CHANGE doesn't get generated).
The second issue can be seen f.e. when we have a macvlan on top of a 8021q
device which has been down and its real device has been changing carrier
states, after setting the 8021q device up, the macvlan device will have
the same carrier state as it was before even though the 8021q can now
have a different state.
Example for case 1:
4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast
state UP mode DEFAULT group default qlen 1000
$ ip l add l eth2 macvl0 type macvlan
$ ip l set macvl0 up
$ ip l sh macvl0
72: macvl0@eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc
noqueue state UNKNOWN mode DEFAULT group default
link/ether f6:0b:54:0a:9d:a3 brd ff:ff:ff:ff:ff:ff
Example for case 2 (order is important):
Prestate: eth2 UP/CARRIER, vlan1 down, vlan1-macvlan down
$ ip l set vlan1-macvlan up
$ ip l sh vlan1-macvlan
71: vlan1-macvlan@vlan1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500
qdisc noqueue state UNKNOWN mode DEFAULT group default
link/ether 4a:b8:44:56:b9:b9 brd ff:ff:ff:ff:ff:ff
[ eth2 loses CARRIER before vlan1 has been UP-ed ]
$ ip l sh eth2
4: eth2: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast
state DOWN mode DEFAULT group default qlen 1000
link/ether 52:54:00:bf:57:16 brd ff:ff:ff:ff:ff:ff
$ ip l sh vlan1-macvlan
71: vlan1-macvlan@vlan1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500
qdisc noqueue state UNKNOWN mode DEFAULT group default
link/ether 4a:b8:44:56:b9:b9 brd ff:ff:ff:ff:ff:ff
$ ip l set vlan1 up
$ ip l sh vlan1
70: vlan1@eth2: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc
noqueue state LOWERLAYERDOWN mode DEFAULT group default qlen 1000
link/ether 52:54:00:bf:57:16 brd ff:ff:ff:ff:ff:ff
$ ip l sh vlan1-macvlan
71: vlan1-macvlan@vlan1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500
qdisc noqueue state UNKNOWN mode DEFAULT group default
link/ether 4a:b8:44:56:b9:b9 brd ff:ff:ff:ff:ff:ff
vlan1-macvlan is still UP, still has carrier and is still in the same
operstate as before. After the patch in case 1 macvl0 has state UP as it
should and in case 2 vlan1-macvlan has state LOWERLAYERDOWN again as it
should. Note that while the lower macvlan device is down their carrier
and thus operstate can go out of sync but that will be fixed once the
lower device goes up again.
This behaviour seems to have been present since beginning of git history.
Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-01-28 00:50:43 +08:00
|
|
|
linkwatch_fire_event(dev);
|
2010-05-24 15:02:25 +08:00
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
return 0;
|
2010-05-24 15:02:25 +08:00
|
|
|
|
2014-02-12 07:51:29 +08:00
|
|
|
unregister_netdev:
|
2017-12-26 21:44:32 +08:00
|
|
|
/* macvlan_uninit would free the macvlan port */
|
2014-02-12 07:51:29 +08:00
|
|
|
unregister_netdevice(dev);
|
2017-12-26 21:44:32 +08:00
|
|
|
return err;
|
2016-11-04 10:28:49 +08:00
|
|
|
destroy_macvlan_port:
|
2017-12-26 21:44:32 +08:00
|
|
|
/* the macvlan port may be freed by macvlan_uninit when fail to register.
|
|
|
|
* so we destroy the macvlan port only when it's valid.
|
|
|
|
*/
|
2018-02-22 23:20:30 +08:00
|
|
|
if (create && macvlan_port_get_rtnl(lowerdev))
|
2016-11-04 10:28:49 +08:00
|
|
|
macvlan_port_destroy(port->dev);
|
2010-05-24 15:02:25 +08:00
|
|
|
return err;
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
2010-01-30 20:23:40 +08:00
|
|
|
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2010-01-30 20:23:40 +08:00
|
|
|
static int macvlan_newlink(struct net *src_net, struct net_device *dev,
|
2017-06-26 05:55:59 +08:00
|
|
|
struct nlattr *tb[], struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2010-01-30 20:23:40 +08:00
|
|
|
{
|
2017-10-05 08:48:47 +08:00
|
|
|
return macvlan_common_newlink(src_net, dev, tb, data, extack);
|
2010-01-30 20:23:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void macvlan_dellink(struct net_device *dev, struct list_head *head)
|
2007-07-15 09:55:06 +08:00
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
if (vlan->mode == MACVLAN_MODE_SOURCE)
|
|
|
|
macvlan_flush_sources(vlan->port, vlan);
|
2013-05-09 12:23:40 +08:00
|
|
|
list_del_rcu(&vlan->list);
|
2009-10-27 15:06:36 +08:00
|
|
|
unregister_netdevice_queue(dev, head);
|
2013-01-04 06:48:50 +08:00
|
|
|
netdev_upper_dev_unlink(vlan->lowerdev, dev);
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
2010-01-30 20:23:40 +08:00
|
|
|
EXPORT_SYMBOL_GPL(macvlan_dellink);
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2009-11-26 14:07:11 +08:00
|
|
|
static int macvlan_changelink(struct net_device *dev,
|
2017-06-26 05:56:00 +08:00
|
|
|
struct nlattr *tb[], struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2009-11-26 14:07:11 +08:00
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
2013-08-01 18:43:19 +08:00
|
|
|
enum macvlan_mode mode;
|
|
|
|
bool set_mode = false;
|
2014-09-25 22:31:08 +08:00
|
|
|
enum macvlan_macaddr_mode macmode;
|
|
|
|
int ret;
|
2013-08-01 18:43:19 +08:00
|
|
|
|
|
|
|
/* Validate mode, but don't set yet: setting flags may fail. */
|
|
|
|
if (data && data[IFLA_MACVLAN_MODE]) {
|
|
|
|
set_mode = true;
|
|
|
|
mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
|
|
|
|
/* Passthrough mode can't be set or cleared dynamically */
|
|
|
|
if ((mode == MACVLAN_MODE_PASSTHRU) !=
|
|
|
|
(vlan->mode == MACVLAN_MODE_PASSTHRU))
|
|
|
|
return -EINVAL;
|
2014-09-25 22:31:08 +08:00
|
|
|
if (vlan->mode == MACVLAN_MODE_SOURCE &&
|
|
|
|
vlan->mode != mode)
|
|
|
|
macvlan_flush_sources(vlan->port, vlan);
|
2013-08-01 18:43:19 +08:00
|
|
|
}
|
2013-06-13 15:07:29 +08:00
|
|
|
|
2012-04-15 14:44:37 +08:00
|
|
|
if (data && data[IFLA_MACVLAN_FLAGS]) {
|
|
|
|
__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
|
|
|
|
bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
|
2017-06-21 19:59:18 +08:00
|
|
|
if (macvlan_passthru(vlan->port) && promisc) {
|
2013-06-13 15:07:29 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (flags & MACVLAN_FLAG_NOPROMISC)
|
|
|
|
err = dev_set_promiscuity(vlan->lowerdev, -1);
|
|
|
|
else
|
|
|
|
err = dev_set_promiscuity(vlan->lowerdev, 1);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
}
|
2012-04-15 14:44:37 +08:00
|
|
|
vlan->flags = flags;
|
|
|
|
}
|
2013-08-01 18:43:19 +08:00
|
|
|
if (set_mode)
|
|
|
|
vlan->mode = mode;
|
2014-09-25 22:31:08 +08:00
|
|
|
if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
|
|
|
|
if (vlan->mode != MACVLAN_MODE_SOURCE)
|
|
|
|
return -EINVAL;
|
|
|
|
macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]);
|
|
|
|
ret = macvlan_changelink_sources(vlan, macmode, data);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2009-11-26 14:07:11 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
static size_t macvlan_get_size_mac(const struct macvlan_dev *vlan)
|
|
|
|
{
|
|
|
|
if (vlan->macaddr_count == 0)
|
|
|
|
return 0;
|
|
|
|
return nla_total_size(0) /* IFLA_MACVLAN_MACADDR_DATA */
|
|
|
|
+ vlan->macaddr_count * nla_total_size(sizeof(u8) * ETH_ALEN);
|
|
|
|
}
|
|
|
|
|
2009-11-26 14:07:11 +08:00
|
|
|
static size_t macvlan_get_size(const struct net_device *dev)
|
|
|
|
{
|
2014-09-25 22:31:08 +08:00
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
|
|
|
2013-01-18 05:30:49 +08:00
|
|
|
return (0
|
|
|
|
+ nla_total_size(4) /* IFLA_MACVLAN_MODE */
|
|
|
|
+ nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
|
2014-09-25 22:31:08 +08:00
|
|
|
+ nla_total_size(4) /* IFLA_MACVLAN_MACADDR_COUNT */
|
|
|
|
+ macvlan_get_size_mac(vlan) /* IFLA_MACVLAN_MACADDR */
|
2013-01-18 05:30:49 +08:00
|
|
|
);
|
2009-11-26 14:07:11 +08:00
|
|
|
}
|
|
|
|
|
2014-09-25 22:31:08 +08:00
|
|
|
static int macvlan_fill_info_macaddr(struct sk_buff *skb,
|
|
|
|
const struct macvlan_dev *vlan,
|
|
|
|
const int i)
|
|
|
|
{
|
|
|
|
struct hlist_head *h = &vlan->port->vlan_source_hash[i];
|
|
|
|
struct macvlan_source_entry *entry;
|
|
|
|
|
|
|
|
hlist_for_each_entry_rcu(entry, h, hlist) {
|
|
|
|
if (entry->vlan != vlan)
|
|
|
|
continue;
|
|
|
|
if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-26 14:07:11 +08:00
|
|
|
static int macvlan_fill_info(struct sk_buff *skb,
|
|
|
|
const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
2014-09-25 22:31:08 +08:00
|
|
|
int i;
|
|
|
|
struct nlattr *nest;
|
2009-11-26 14:07:11 +08:00
|
|
|
|
2012-04-02 08:23:06 +08:00
|
|
|
if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
|
|
|
|
goto nla_put_failure;
|
2012-04-15 14:44:37 +08:00
|
|
|
if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
|
|
|
|
goto nla_put_failure;
|
2014-09-25 22:31:08 +08:00
|
|
|
if (nla_put_u32(skb, IFLA_MACVLAN_MACADDR_COUNT, vlan->macaddr_count))
|
|
|
|
goto nla_put_failure;
|
|
|
|
if (vlan->macaddr_count > 0) {
|
2019-04-26 17:13:06 +08:00
|
|
|
nest = nla_nest_start_noflag(skb, IFLA_MACVLAN_MACADDR_DATA);
|
2014-09-25 22:31:08 +08:00
|
|
|
if (nest == NULL)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
|
|
|
|
if (macvlan_fill_info_macaddr(skb, vlan, i))
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
}
|
2009-11-26 14:07:11 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
|
2012-04-15 14:44:37 +08:00
|
|
|
[IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
|
|
|
|
[IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
|
2014-09-25 22:31:08 +08:00
|
|
|
[IFLA_MACVLAN_MACADDR_MODE] = { .type = NLA_U32 },
|
|
|
|
[IFLA_MACVLAN_MACADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
|
|
|
|
[IFLA_MACVLAN_MACADDR_DATA] = { .type = NLA_NESTED },
|
|
|
|
[IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 },
|
2009-11-26 14:07:11 +08:00
|
|
|
};
|
|
|
|
|
2010-01-30 20:23:40 +08:00
|
|
|
int macvlan_link_register(struct rtnl_link_ops *ops)
|
|
|
|
{
|
|
|
|
/* common fields */
|
|
|
|
ops->validate = macvlan_validate;
|
|
|
|
ops->maxtype = IFLA_MACVLAN_MAX;
|
|
|
|
ops->policy = macvlan_policy;
|
|
|
|
ops->changelink = macvlan_changelink;
|
|
|
|
ops->get_size = macvlan_get_size;
|
|
|
|
ops->fill_info = macvlan_fill_info;
|
|
|
|
|
|
|
|
return rtnl_link_register(ops);
|
|
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(macvlan_link_register);
|
|
|
|
|
2015-01-20 22:15:45 +08:00
|
|
|
static struct net *macvlan_get_link_net(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return dev_net(macvlan_dev_real_dev(dev));
|
|
|
|
}
|
|
|
|
|
2010-01-30 20:23:40 +08:00
|
|
|
static struct rtnl_link_ops macvlan_link_ops = {
|
2007-07-15 09:55:06 +08:00
|
|
|
.kind = "macvlan",
|
2010-07-22 05:44:31 +08:00
|
|
|
.setup = macvlan_setup,
|
2007-07-15 09:55:06 +08:00
|
|
|
.newlink = macvlan_newlink,
|
|
|
|
.dellink = macvlan_dellink,
|
2015-01-20 22:15:45 +08:00
|
|
|
.get_link_net = macvlan_get_link_net,
|
2017-02-11 08:03:49 +08:00
|
|
|
.priv_size = sizeof(struct macvlan_dev),
|
2007-07-15 09:55:06 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int macvlan_device_event(struct notifier_block *unused,
|
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
2013-05-28 09:30:21 +08:00
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
2007-07-15 09:55:06 +08:00
|
|
|
struct macvlan_dev *vlan, *next;
|
|
|
|
struct macvlan_port *port;
|
2011-05-09 07:17:57 +08:00
|
|
|
LIST_HEAD(list_kill);
|
2007-07-15 09:55:06 +08:00
|
|
|
|
2019-01-31 17:48:10 +08:00
|
|
|
if (!netif_is_macvlan_port(dev))
|
2007-07-15 09:55:06 +08:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
2013-03-30 18:08:44 +08:00
|
|
|
port = macvlan_port_get_rtnl(dev);
|
2010-06-15 11:27:57 +08:00
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
switch (event) {
|
macvlan: make operstate and carrier more accurate
Currently when a macvlan is being initialized and the lower device is
netif_carrier_ok(), the macvlan device doesn't run through
rfc2863_policy() and is left with UNKNOWN operstate. Fix it by adding an
unconditional linkwatch event for the new macvlan device. Similar fix is
already used by the 8021q device (see register_vlan_dev()). Also fix the
inconsistent state when the lower device has been down and its carrier
was changed (when a device is down NETDEV_CHANGE doesn't get generated).
The second issue can be seen f.e. when we have a macvlan on top of a 8021q
device which has been down and its real device has been changing carrier
states, after setting the 8021q device up, the macvlan device will have
the same carrier state as it was before even though the 8021q can now
have a different state.
Example for case 1:
4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast
state UP mode DEFAULT group default qlen 1000
$ ip l add l eth2 macvl0 type macvlan
$ ip l set macvl0 up
$ ip l sh macvl0
72: macvl0@eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc
noqueue state UNKNOWN mode DEFAULT group default
link/ether f6:0b:54:0a:9d:a3 brd ff:ff:ff:ff:ff:ff
Example for case 2 (order is important):
Prestate: eth2 UP/CARRIER, vlan1 down, vlan1-macvlan down
$ ip l set vlan1-macvlan up
$ ip l sh vlan1-macvlan
71: vlan1-macvlan@vlan1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500
qdisc noqueue state UNKNOWN mode DEFAULT group default
link/ether 4a:b8:44:56:b9:b9 brd ff:ff:ff:ff:ff:ff
[ eth2 loses CARRIER before vlan1 has been UP-ed ]
$ ip l sh eth2
4: eth2: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast
state DOWN mode DEFAULT group default qlen 1000
link/ether 52:54:00:bf:57:16 brd ff:ff:ff:ff:ff:ff
$ ip l sh vlan1-macvlan
71: vlan1-macvlan@vlan1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500
qdisc noqueue state UNKNOWN mode DEFAULT group default
link/ether 4a:b8:44:56:b9:b9 brd ff:ff:ff:ff:ff:ff
$ ip l set vlan1 up
$ ip l sh vlan1
70: vlan1@eth2: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc
noqueue state LOWERLAYERDOWN mode DEFAULT group default qlen 1000
link/ether 52:54:00:bf:57:16 brd ff:ff:ff:ff:ff:ff
$ ip l sh vlan1-macvlan
71: vlan1-macvlan@vlan1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500
qdisc noqueue state UNKNOWN mode DEFAULT group default
link/ether 4a:b8:44:56:b9:b9 brd ff:ff:ff:ff:ff:ff
vlan1-macvlan is still UP, still has carrier and is still in the same
operstate as before. After the patch in case 1 macvl0 has state UP as it
should and in case 2 vlan1-macvlan has state LOWERLAYERDOWN again as it
should. Note that while the lower macvlan device is down their carrier
and thus operstate can go out of sync but that will be fixed once the
lower device goes up again.
This behaviour seems to have been present since beginning of git history.
Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-01-28 00:50:43 +08:00
|
|
|
case NETDEV_UP:
|
2018-07-10 08:35:01 +08:00
|
|
|
case NETDEV_DOWN:
|
2007-07-15 09:55:06 +08:00
|
|
|
case NETDEV_CHANGE:
|
|
|
|
list_for_each_entry(vlan, &port->vlans, list)
|
2009-12-04 07:59:22 +08:00
|
|
|
netif_stacked_transfer_operstate(vlan->lowerdev,
|
|
|
|
vlan->dev);
|
2007-07-15 09:55:06 +08:00
|
|
|
break;
|
|
|
|
case NETDEV_FEAT_CHANGE:
|
|
|
|
list_for_each_entry(vlan, &port->vlans, list) {
|
2009-11-24 06:18:53 +08:00
|
|
|
vlan->dev->gso_max_size = dev->gso_max_size;
|
2016-03-17 12:59:49 +08:00
|
|
|
vlan->dev->gso_max_segs = dev->gso_max_segs;
|
2013-12-26 19:17:00 +08:00
|
|
|
netdev_update_features(vlan->dev);
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
break;
|
2014-05-13 14:39:27 +08:00
|
|
|
case NETDEV_CHANGEMTU:
|
|
|
|
list_for_each_entry(vlan, &port->vlans, list) {
|
|
|
|
if (vlan->dev->mtu <= dev->mtu)
|
|
|
|
continue;
|
|
|
|
dev_set_mtu(vlan->dev, dev->mtu);
|
|
|
|
}
|
2014-05-30 14:32:49 +08:00
|
|
|
break;
|
|
|
|
case NETDEV_CHANGEADDR:
|
2017-06-21 19:59:18 +08:00
|
|
|
if (!macvlan_passthru(port))
|
2014-05-30 14:32:49 +08:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
vlan = list_first_entry_or_null(&port->vlans,
|
|
|
|
struct macvlan_dev,
|
|
|
|
list);
|
|
|
|
|
|
|
|
if (macvlan_sync_address(vlan->dev, dev->dev_addr))
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
|
2014-05-13 14:39:27 +08:00
|
|
|
break;
|
2007-07-15 09:55:06 +08:00
|
|
|
case NETDEV_UNREGISTER:
|
2010-09-17 11:22:19 +08:00
|
|
|
/* twiddle thumbs on netns device moves */
|
|
|
|
if (dev->reg_state != NETREG_UNREGISTERING)
|
|
|
|
break;
|
|
|
|
|
2007-07-15 09:55:06 +08:00
|
|
|
list_for_each_entry_safe(vlan, next, &port->vlans, list)
|
2011-05-09 07:17:57 +08:00
|
|
|
vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
|
|
|
|
unregister_netdevice_many(&list_kill);
|
2007-07-15 09:55:06 +08:00
|
|
|
break;
|
2010-03-10 18:30:19 +08:00
|
|
|
case NETDEV_PRE_TYPE_CHANGE:
|
|
|
|
/* Forbid underlaying device to change its type. */
|
|
|
|
return NOTIFY_BAD;
|
2014-06-05 04:23:37 +08:00
|
|
|
|
|
|
|
case NETDEV_NOTIFY_PEERS:
|
|
|
|
case NETDEV_BONDING_FAILOVER:
|
|
|
|
case NETDEV_RESEND_IGMP:
|
|
|
|
/* Propagate to all vlans */
|
|
|
|
list_for_each_entry(vlan, &port->vlans, list)
|
|
|
|
call_netdevice_notifiers(event, vlan->dev);
|
2007-07-15 09:55:06 +08:00
|
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block macvlan_notifier_block __read_mostly = {
|
|
|
|
.notifier_call = macvlan_device_event,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init macvlan_init_module(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
register_netdevice_notifier(&macvlan_notifier_block);
|
|
|
|
|
2010-01-30 20:23:40 +08:00
|
|
|
err = macvlan_link_register(&macvlan_link_ops);
|
2007-07-15 09:55:06 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto err1;
|
|
|
|
return 0;
|
|
|
|
err1:
|
|
|
|
unregister_netdevice_notifier(&macvlan_notifier_block);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit macvlan_cleanup_module(void)
|
|
|
|
{
|
|
|
|
rtnl_link_unregister(&macvlan_link_ops);
|
|
|
|
unregister_netdevice_notifier(&macvlan_notifier_block);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(macvlan_init_module);
|
|
|
|
module_exit(macvlan_cleanup_module);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
|
|
|
|
MODULE_DESCRIPTION("Driver for MAC address based VLANs");
|
|
|
|
MODULE_ALIAS_RTNL_LINK("macvlan");
|