2019-05-27 14:55:01 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* VLAN An implementation of 802.1Q VLAN tagging.
|
|
|
|
*
|
|
|
|
* Authors: Ben Greear <greearb@candelatech.com>
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_IF_VLAN_H_
|
|
|
|
#define _LINUX_IF_VLAN_H_
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
2006-07-15 07:34:22 +08:00
|
|
|
#include <linux/etherdevice.h>
|
2010-10-20 21:56:05 +08:00
|
|
|
#include <linux/rtnetlink.h>
|
2011-11-24 09:12:59 +08:00
|
|
|
#include <linux/bug.h>
|
2012-10-13 17:46:48 +08:00
|
|
|
#include <uapi/linux/if_vlan.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-03-19 01:37:56 +08:00
|
|
|
#define VLAN_HLEN 4 /* The additional bytes required by VLAN
|
|
|
|
* (in addition to the Ethernet header)
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#define VLAN_ETH_HLEN 18 /* Total octets in header. */
|
|
|
|
#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
|
|
|
|
*/
|
|
|
|
#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
|
|
|
|
#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */
|
|
|
|
|
2008-01-21 16:18:53 +08:00
|
|
|
/*
|
|
|
|
* struct vlan_hdr - vlan header
|
|
|
|
* @h_vlan_TCI: priority and VLAN ID
|
|
|
|
* @h_vlan_encapsulated_proto: packet type ID or len
|
|
|
|
*/
|
|
|
|
struct vlan_hdr {
|
|
|
|
__be16 h_vlan_TCI;
|
|
|
|
__be16 h_vlan_encapsulated_proto;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
|
|
|
|
* @h_dest: destination ethernet address
|
|
|
|
* @h_source: source ethernet address
|
2013-06-14 23:07:01 +08:00
|
|
|
* @h_vlan_proto: ethernet protocol
|
2008-01-21 16:18:53 +08:00
|
|
|
* @h_vlan_TCI: priority and VLAN ID
|
|
|
|
* @h_vlan_encapsulated_proto: packet type ID or len
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
struct vlan_ethhdr {
|
2008-01-21 16:18:53 +08:00
|
|
|
unsigned char h_dest[ETH_ALEN];
|
|
|
|
unsigned char h_source[ETH_ALEN];
|
|
|
|
__be16 h_vlan_proto;
|
|
|
|
__be16 h_vlan_TCI;
|
|
|
|
__be16 h_vlan_encapsulated_proto;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
|
|
|
|
static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
|
|
|
|
{
|
2007-03-20 06:33:04 +08:00
|
|
|
return (struct vlan_ethhdr *)skb_mac_header(skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-10-27 09:40:35 +08:00
|
|
|
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
|
|
|
|
#define VLAN_PRIO_SHIFT 13
|
2018-11-20 20:20:31 +08:00
|
|
|
#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
|
2009-10-27 09:40:35 +08:00
|
|
|
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
|
2010-10-20 21:56:02 +08:00
|
|
|
#define VLAN_N_VID 4096
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* found in socket.c */
|
2007-09-18 02:56:21 +08:00
|
|
|
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-12-04 22:01:31 +08:00
|
|
|
static inline bool is_vlan_dev(const struct net_device *dev)
|
2011-05-24 16:31:08 +08:00
|
|
|
{
|
|
|
|
return dev->priv_flags & IFF_802_1Q_VLAN;
|
|
|
|
}
|
|
|
|
|
2018-11-11 02:58:36 +08:00
|
|
|
#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present)
|
|
|
|
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
|
2015-01-14 00:13:44 +08:00
|
|
|
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
|
2018-11-20 20:20:31 +08:00
|
|
|
#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
|
2018-11-08 01:07:03 +08:00
|
|
|
#define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-28 22:46:54 +08:00
|
|
|
static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
|
|
|
|
{
|
|
|
|
ASSERT_RTNL();
|
|
|
|
return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
|
|
|
|
{
|
|
|
|
ASSERT_RTNL();
|
|
|
|
call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
|
|
|
|
{
|
|
|
|
ASSERT_RTNL();
|
|
|
|
return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
|
|
|
|
{
|
|
|
|
ASSERT_RTNL();
|
|
|
|
call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
|
|
|
|
}
|
|
|
|
|
2013-11-11 13:42:07 +08:00
|
|
|
/**
|
|
|
|
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
|
|
|
|
* @rx_packets: number of received packets
|
|
|
|
* @rx_bytes: number of received bytes
|
|
|
|
* @rx_multicast: number of received multicast packets
|
|
|
|
* @tx_packets: number of transmitted packets
|
|
|
|
* @tx_bytes: number of transmitted bytes
|
|
|
|
* @syncp: synchronization point for 64bit counters
|
|
|
|
* @rx_errors: number of rx errors
|
|
|
|
* @tx_dropped: number of tx drops
|
|
|
|
*/
|
|
|
|
struct vlan_pcpu_stats {
|
|
|
|
u64 rx_packets;
|
|
|
|
u64 rx_bytes;
|
|
|
|
u64 rx_multicast;
|
|
|
|
u64 tx_packets;
|
|
|
|
u64 tx_bytes;
|
|
|
|
struct u64_stats_sync syncp;
|
|
|
|
u32 rx_errors;
|
|
|
|
u32 tx_dropped;
|
|
|
|
};
|
|
|
|
|
2014-01-06 08:41:20 +08:00
|
|
|
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
|
|
|
|
2014-05-09 14:58:05 +08:00
|
|
|
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
|
2014-01-06 08:41:20 +08:00
|
|
|
__be16 vlan_proto, u16 vlan_id);
|
2018-11-09 04:27:55 +08:00
|
|
|
extern int vlan_for_each(struct net_device *dev,
|
|
|
|
int (*action)(struct net_device *dev, int vid,
|
|
|
|
void *arg), void *arg);
|
2014-01-06 08:41:20 +08:00
|
|
|
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
|
|
|
|
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
|
2014-03-25 17:44:42 +08:00
|
|
|
extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
|
2014-01-06 08:41:20 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* struct vlan_priority_tci_mapping - vlan egress priority mappings
|
|
|
|
* @priority: skb priority
|
|
|
|
* @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
|
|
|
|
* @next: pointer to next struct
|
|
|
|
*/
|
|
|
|
struct vlan_priority_tci_mapping {
|
|
|
|
u32 priority;
|
|
|
|
u16 vlan_qos;
|
|
|
|
struct vlan_priority_tci_mapping *next;
|
|
|
|
};
|
|
|
|
|
2013-11-11 13:42:07 +08:00
|
|
|
struct proc_dir_entry;
|
|
|
|
struct netpoll;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct vlan_dev_priv - VLAN private device data
|
|
|
|
* @nr_ingress_mappings: number of ingress priority mappings
|
|
|
|
* @ingress_priority_map: ingress priority mappings
|
|
|
|
* @nr_egress_mappings: number of egress priority mappings
|
|
|
|
* @egress_priority_map: hash of egress priority mappings
|
|
|
|
* @vlan_proto: VLAN encapsulation protocol
|
|
|
|
* @vlan_id: VLAN identifier
|
|
|
|
* @flags: device flags
|
|
|
|
* @real_dev: underlying netdevice
|
|
|
|
* @real_dev_addr: address of underlying netdevice
|
|
|
|
* @dent: proc dir entry
|
|
|
|
* @vlan_pcpu_stats: ptr to percpu rx stats
|
|
|
|
*/
|
|
|
|
struct vlan_dev_priv {
|
|
|
|
unsigned int nr_ingress_mappings;
|
|
|
|
u32 ingress_priority_map[8];
|
|
|
|
unsigned int nr_egress_mappings;
|
|
|
|
struct vlan_priority_tci_mapping *egress_priority_map[16];
|
|
|
|
|
|
|
|
__be16 vlan_proto;
|
|
|
|
u16 vlan_id;
|
|
|
|
u16 flags;
|
|
|
|
|
|
|
|
struct net_device *real_dev;
|
|
|
|
unsigned char real_dev_addr[ETH_ALEN];
|
|
|
|
|
|
|
|
struct proc_dir_entry *dent;
|
|
|
|
struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
struct netpoll *netpoll;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return netdev_priv(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u16
|
|
|
|
vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
|
|
|
|
{
|
|
|
|
struct vlan_priority_tci_mapping *mp;
|
|
|
|
|
|
|
|
smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
|
|
|
|
|
|
|
|
mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
|
|
|
|
while (mp) {
|
|
|
|
if (mp->priority == skprio) {
|
|
|
|
return mp->vlan_qos; /* This should already be shifted
|
|
|
|
* to mask correctly with the
|
|
|
|
* VLAN's TCI */
|
|
|
|
}
|
|
|
|
mp = mp->next;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
vlan: don't deliver frames for unknown vlans to protocols
6a32e4f9dd9219261f8856f817e6655114cfec2f made the vlan code skip marking
vlan-tagged frames for not locally configured vlans as PACKET_OTHERHOST if
there was an rx_handler, as the rx_handler could cause the frame to be received
on a different (virtual) vlan-capable interface where that vlan might be
configured.
As rx_handlers do not necessarily return RX_HANDLER_ANOTHER, this could cause
frames for unknown vlans to be delivered to the protocol stack as if they had
been received untagged.
For example, if an ipv6 router advertisement that's tagged for a locally not
configured vlan is received on an interface with macvlan interfaces attached,
macvlan's rx_handler returns RX_HANDLER_PASS after delivering the frame to the
macvlan interfaces, which caused it to be passed to the protocol stack, leading
to ipv6 addresses for the announced prefix being configured even though those
are completely unusable on the underlying interface.
The fix moves marking as PACKET_OTHERHOST after the rx_handler so the
rx_handler, if there is one, sees the frame unchanged, but afterwards,
before the frame is delivered to the protocol stack, it gets marked whether
there is an rx_handler or not.
Signed-off-by: Florian Zumbiehl <florz@florz.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-07 23:51:58 +08:00
|
|
|
extern bool vlan_do_receive(struct sk_buff **skb);
|
2008-11-05 06:49:57 +08:00
|
|
|
|
2013-04-19 10:04:28 +08:00
|
|
|
extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
|
|
|
|
extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
|
2011-12-08 12:11:17 +08:00
|
|
|
|
2011-12-08 12:11:19 +08:00
|
|
|
extern int vlan_vids_add_by_dev(struct net_device *dev,
|
|
|
|
const struct net_device *by_dev);
|
|
|
|
extern void vlan_vids_del_by_dev(struct net_device *dev,
|
|
|
|
const struct net_device *by_dev);
|
2012-08-23 11:26:52 +08:00
|
|
|
|
|
|
|
extern bool vlan_uses_dev(const struct net_device *dev);
|
2014-05-20 22:59:26 +08:00
|
|
|
|
2008-07-08 18:23:36 +08:00
|
|
|
#else
|
2011-07-20 12:54:05 +08:00
|
|
|
static inline struct net_device *
|
2014-05-09 14:58:05 +08:00
|
|
|
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
|
2013-04-21 07:51:41 +08:00
|
|
|
__be16 vlan_proto, u16 vlan_id)
|
2011-07-20 12:54:05 +08:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:27:55 +08:00
|
|
|
static inline int
|
|
|
|
vlan_for_each(struct net_device *dev,
|
|
|
|
int (*action)(struct net_device *dev, int vid, void *arg),
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-08 18:23:57 +08:00
|
|
|
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
BUG();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
BUG();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-03-25 17:44:42 +08:00
|
|
|
static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
BUG();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-06 21:37:23 +08:00
|
|
|
static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
|
|
|
|
u32 skprio)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
vlan: don't deliver frames for unknown vlans to protocols
6a32e4f9dd9219261f8856f817e6655114cfec2f made the vlan code skip marking
vlan-tagged frames for not locally configured vlans as PACKET_OTHERHOST if
there was an rx_handler, as the rx_handler could cause the frame to be received
on a different (virtual) vlan-capable interface where that vlan might be
configured.
As rx_handlers do not necessarily return RX_HANDLER_ANOTHER, this could cause
frames for unknown vlans to be delivered to the protocol stack as if they had
been received untagged.
For example, if an ipv6 router advertisement that's tagged for a locally not
configured vlan is received on an interface with macvlan interfaces attached,
macvlan's rx_handler returns RX_HANDLER_PASS after delivering the frame to the
macvlan interfaces, which caused it to be passed to the protocol stack, leading
to ipv6 addresses for the announced prefix being configured even though those
are completely unusable on the underlying interface.
The fix moves marking as PACKET_OTHERHOST after the rx_handler so the
rx_handler, if there is one, sees the frame unchanged, but afterwards,
before the frame is delivered to the protocol stack, it gets marked whether
there is an rx_handler or not.
Signed-off-by: Florian Zumbiehl <florz@florz.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-07 23:51:58 +08:00
|
|
|
static inline bool vlan_do_receive(struct sk_buff **skb)
|
2008-11-05 06:49:57 +08:00
|
|
|
{
|
2010-10-20 21:56:06 +08:00
|
|
|
return false;
|
2008-11-05 06:49:57 +08:00
|
|
|
}
|
2009-01-07 02:50:09 +08:00
|
|
|
|
2013-04-21 07:51:41 +08:00
|
|
|
static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
|
2011-12-08 12:11:17 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-04-21 07:51:41 +08:00
|
|
|
static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
|
2011-12-08 12:11:17 +08:00
|
|
|
{
|
|
|
|
}
|
2011-12-08 12:11:19 +08:00
|
|
|
|
|
|
|
static inline int vlan_vids_add_by_dev(struct net_device *dev,
|
|
|
|
const struct net_device *by_dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vlan_vids_del_by_dev(struct net_device *dev,
|
|
|
|
const struct net_device *by_dev)
|
|
|
|
{
|
|
|
|
}
|
2012-08-23 11:26:52 +08:00
|
|
|
|
|
|
|
static inline bool vlan_uses_dev(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2008-07-08 18:23:36 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-09-08 00:56:58 +08:00
|
|
|
/**
|
|
|
|
* eth_type_vlan - check for valid vlan ether type.
|
|
|
|
* @ethertype: ether type to check
|
|
|
|
*
|
|
|
|
* Returns true if the ether type is a vlan ether type.
|
|
|
|
*/
|
|
|
|
static inline bool eth_type_vlan(__be16 ethertype)
|
|
|
|
{
|
|
|
|
switch (ethertype) {
|
|
|
|
case htons(ETH_P_8021Q):
|
|
|
|
case htons(ETH_P_8021AD):
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-19 10:04:30 +08:00
|
|
|
static inline bool vlan_hw_offload_capable(netdev_features_t features,
|
|
|
|
__be16 proto)
|
|
|
|
{
|
|
|
|
if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
|
|
|
|
return true;
|
net: vlan: add 802.1ad support
Add support for 802.1ad VLAN devices. This mainly consists of checking for
ETH_P_8021AD in addition to ETH_P_8021Q in a couple of places and check
offloading capabilities based on the used protocol.
Configuration is done using "ip link":
# ip link add link eth0 eth0.1000 \
type vlan proto 802.1ad id 1000
# ip link add link eth0.1000 eth0.1000.1000 \
type vlan proto 802.1q id 1000
52:54:00:12:34:56 > 92:b1:54:28:e4:8c, ethertype 802.1Q (0x8100), length 106: vlan 1000, p 0, ethertype 802.1Q, vlan 1000, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 0, offset 0, flags [DF], proto ICMP (1), length 84)
20.1.0.2 > 20.1.0.1: ICMP echo request, id 3003, seq 8, length 64
92:b1:54:28:e4:8c > 52:54:00:12:34:56, ethertype 802.1Q-QinQ (0x88a8), length 106: vlan 1000, p 0, ethertype 802.1Q, vlan 1000, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 47944, offset 0, flags [none], proto ICMP (1), length 84)
20.1.0.1 > 20.1.0.2: ICMP echo reply, id 3003, seq 8, length 64
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-04-19 10:04:31 +08:00
|
|
|
if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
|
|
|
|
return true;
|
2013-04-19 10:04:30 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/**
|
2018-03-13 13:51:28 +08:00
|
|
|
* __vlan_insert_inner_tag - inner VLAN tag inserting
|
2005-04-17 06:20:36 +08:00
|
|
|
* @skb: skbuff to tag
|
2013-04-19 10:04:30 +08:00
|
|
|
* @vlan_proto: VLAN encapsulation protocol
|
2008-07-08 18:24:44 +08:00
|
|
|
* @vlan_tci: VLAN TCI to insert
|
2018-03-13 13:51:28 +08:00
|
|
|
* @mac_len: MAC header length including outer vlan headers
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2018-03-13 13:51:28 +08:00
|
|
|
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
|
2018-05-31 20:20:20 +08:00
|
|
|
* Returns error if skb_cow_head fails.
|
2011-06-10 14:56:58 +08:00
|
|
|
*
|
|
|
|
* Does not change skb->protocol so this function can be used during receive.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2018-03-13 13:51:28 +08:00
|
|
|
static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
|
|
|
|
__be16 vlan_proto, u16 vlan_tci,
|
|
|
|
unsigned int mac_len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct vlan_ethhdr *veth;
|
|
|
|
|
2014-11-19 21:05:00 +08:00
|
|
|
if (skb_cow_head(skb, VLAN_HLEN) < 0)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2018-03-13 13:51:28 +08:00
|
|
|
skb_push(skb, VLAN_HLEN);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-13 13:51:28 +08:00
|
|
|
/* Move the mac header sans proto to the beginning of the new header. */
|
2018-03-29 18:05:30 +08:00
|
|
|
if (likely(mac_len > ETH_TLEN))
|
|
|
|
memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
|
2009-02-19 15:31:11 +08:00
|
|
|
skb->mac_header -= VLAN_HLEN;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-13 13:51:28 +08:00
|
|
|
veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* first, the ethernet type */
|
2018-03-29 18:05:30 +08:00
|
|
|
if (likely(mac_len >= ETH_TLEN)) {
|
|
|
|
/* h_vlan_encapsulated_proto should already be populated, and
|
|
|
|
* skb->data has space for h_vlan_proto
|
|
|
|
*/
|
|
|
|
veth->h_vlan_proto = vlan_proto;
|
|
|
|
} else {
|
|
|
|
/* h_vlan_encapsulated_proto should not be populated, and
|
|
|
|
* skb->data has no space for h_vlan_proto
|
|
|
|
*/
|
|
|
|
veth->h_vlan_encapsulated_proto = skb->protocol;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-07-08 18:24:44 +08:00
|
|
|
/* now, the TCI */
|
|
|
|
veth->h_vlan_TCI = htons(vlan_tci);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-11-19 21:05:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-03-13 13:51:28 +08:00
|
|
|
* __vlan_insert_tag - regular VLAN tag inserting
|
2014-11-19 21:05:00 +08:00
|
|
|
* @skb: skbuff to tag
|
|
|
|
* @vlan_proto: VLAN encapsulation protocol
|
|
|
|
* @vlan_tci: VLAN TCI to insert
|
|
|
|
*
|
|
|
|
* Inserts the VLAN tag into @skb as part of the payload
|
2018-05-31 20:20:20 +08:00
|
|
|
* Returns error if skb_cow_head fails.
|
2018-03-13 13:51:28 +08:00
|
|
|
*
|
|
|
|
* Does not change skb->protocol so this function can be used during receive.
|
|
|
|
*/
|
|
|
|
static inline int __vlan_insert_tag(struct sk_buff *skb,
|
|
|
|
__be16 vlan_proto, u16 vlan_tci)
|
|
|
|
{
|
|
|
|
return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vlan_insert_inner_tag - inner VLAN tag inserting
|
|
|
|
* @skb: skbuff to tag
|
|
|
|
* @vlan_proto: VLAN encapsulation protocol
|
|
|
|
* @vlan_tci: VLAN TCI to insert
|
|
|
|
* @mac_len: MAC header length including outer vlan headers
|
|
|
|
*
|
|
|
|
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
|
2014-11-19 21:05:00 +08:00
|
|
|
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
|
|
|
|
*
|
|
|
|
* Following the skb_unshare() example, in case of error, the calling function
|
|
|
|
* doesn't have to worry about freeing the original skb.
|
|
|
|
*
|
|
|
|
* Does not change skb->protocol so this function can be used during receive.
|
|
|
|
*/
|
2018-03-13 13:51:28 +08:00
|
|
|
static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
|
|
|
|
__be16 vlan_proto,
|
|
|
|
u16 vlan_tci,
|
|
|
|
unsigned int mac_len)
|
2014-11-19 21:05:00 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2018-03-13 13:51:28 +08:00
|
|
|
err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
|
2014-11-19 21:05:00 +08:00
|
|
|
if (err) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-06-10 14:56:58 +08:00
|
|
|
return skb;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-13 13:51:28 +08:00
|
|
|
/**
|
|
|
|
* vlan_insert_tag - regular VLAN tag inserting
|
|
|
|
* @skb: skbuff to tag
|
|
|
|
* @vlan_proto: VLAN encapsulation protocol
|
|
|
|
* @vlan_tci: VLAN TCI to insert
|
|
|
|
*
|
|
|
|
* Inserts the VLAN tag into @skb as part of the payload
|
|
|
|
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
|
|
|
|
*
|
|
|
|
* Following the skb_unshare() example, in case of error, the calling function
|
|
|
|
* doesn't have to worry about freeing the original skb.
|
|
|
|
*
|
|
|
|
* Does not change skb->protocol so this function can be used during receive.
|
|
|
|
*/
|
|
|
|
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
|
|
|
|
__be16 vlan_proto, u16 vlan_tci)
|
|
|
|
{
|
|
|
|
return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
|
|
|
|
}
|
|
|
|
|
2011-06-10 14:56:58 +08:00
|
|
|
/**
|
2014-11-19 21:04:58 +08:00
|
|
|
* vlan_insert_tag_set_proto - regular VLAN tag inserting
|
2011-06-10 14:56:58 +08:00
|
|
|
* @skb: skbuff to tag
|
2014-11-19 21:04:58 +08:00
|
|
|
* @vlan_proto: VLAN encapsulation protocol
|
2011-06-10 14:56:58 +08:00
|
|
|
* @vlan_tci: VLAN TCI to insert
|
|
|
|
*
|
|
|
|
* Inserts the VLAN tag into @skb as part of the payload
|
|
|
|
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
|
|
|
|
*
|
|
|
|
* Following the skb_unshare() example, in case of error, the calling function
|
|
|
|
* doesn't have to worry about freeing the original skb.
|
|
|
|
*/
|
2014-11-19 21:04:58 +08:00
|
|
|
static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
|
|
|
|
__be16 vlan_proto,
|
|
|
|
u16 vlan_tci)
|
2011-06-10 14:56:58 +08:00
|
|
|
{
|
2013-04-19 10:04:30 +08:00
|
|
|
skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
|
2011-06-10 14:56:58 +08:00
|
|
|
if (skb)
|
2013-04-19 10:04:30 +08:00
|
|
|
skb->protocol = vlan_proto;
|
2005-04-17 06:20:36 +08:00
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2018-11-08 01:07:02 +08:00
|
|
|
/**
|
|
|
|
* __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
|
|
|
|
* @skb: skbuff to clear
|
|
|
|
*
|
|
|
|
* Clears the VLAN information from @skb
|
|
|
|
*/
|
|
|
|
static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
|
|
|
|
{
|
2018-11-11 02:58:36 +08:00
|
|
|
skb->vlan_present = 0;
|
2018-11-08 01:07:02 +08:00
|
|
|
}
|
|
|
|
|
2018-11-08 01:07:02 +08:00
|
|
|
/**
|
|
|
|
* __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
|
|
|
|
* @dst: skbuff to copy to
|
|
|
|
* @src: skbuff to copy from
|
|
|
|
*
|
|
|
|
* Copies VLAN information from @src to @dst (for branchless code)
|
|
|
|
*/
|
|
|
|
static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
|
|
|
|
{
|
2018-11-11 02:58:36 +08:00
|
|
|
dst->vlan_present = src->vlan_present;
|
2018-11-08 01:07:02 +08:00
|
|
|
dst->vlan_proto = src->vlan_proto;
|
|
|
|
dst->vlan_tci = src->vlan_tci;
|
|
|
|
}
|
|
|
|
|
2014-11-19 21:04:59 +08:00
|
|
|
/*
|
|
|
|
* __vlan_hwaccel_push_inside - pushes vlan tag to the payload
|
|
|
|
* @skb: skbuff to tag
|
|
|
|
*
|
|
|
|
* Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
|
|
|
|
*
|
|
|
|
* Following the skb_unshare() example, in case of error, the calling function
|
|
|
|
* doesn't have to worry about freeing the original skb.
|
|
|
|
*/
|
|
|
|
static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
|
2015-01-14 00:13:44 +08:00
|
|
|
skb_vlan_tag_get(skb));
|
2014-11-19 21:04:59 +08:00
|
|
|
if (likely(skb))
|
2018-11-08 01:07:02 +08:00
|
|
|
__vlan_hwaccel_clear_tag(skb);
|
2014-11-19 21:04:59 +08:00
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/**
|
|
|
|
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
|
|
|
|
* @skb: skbuff to tag
|
2013-04-19 10:04:30 +08:00
|
|
|
* @vlan_proto: VLAN encapsulation protocol
|
2008-07-08 18:24:44 +08:00
|
|
|
* @vlan_tci: VLAN TCI to insert
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2008-07-15 13:49:06 +08:00
|
|
|
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2014-11-19 21:04:56 +08:00
|
|
|
static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
|
|
|
|
__be16 vlan_proto, u16 vlan_tci)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-04-19 10:04:30 +08:00
|
|
|
skb->vlan_proto = vlan_proto;
|
2018-11-11 02:58:36 +08:00
|
|
|
skb->vlan_tci = vlan_tci;
|
|
|
|
skb->vlan_present = 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __vlan_get_tag - get the VLAN ID that is part of the payload
|
|
|
|
* @skb: skbuff to query
|
2015-05-28 09:07:26 +08:00
|
|
|
* @vlan_tci: buffer to store value
|
2008-07-08 18:24:44 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Returns error if the skb is not of VLAN type
|
|
|
|
*/
|
2008-07-08 18:24:44 +08:00
|
|
|
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
|
|
|
|
|
2016-09-08 00:56:58 +08:00
|
|
|
if (!eth_type_vlan(veth->h_vlan_proto))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2008-07-08 18:24:44 +08:00
|
|
|
*vlan_tci = ntohs(veth->h_vlan_TCI);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
|
|
|
|
* @skb: skbuff to query
|
2015-05-28 09:07:26 +08:00
|
|
|
* @vlan_tci: buffer to store value
|
2008-07-08 18:24:44 +08:00
|
|
|
*
|
2008-07-15 13:49:06 +08:00
|
|
|
* Returns error if @skb->vlan_tci is not set correctly
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2008-02-06 08:20:22 +08:00
|
|
|
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
|
2008-07-08 18:24:44 +08:00
|
|
|
u16 *vlan_tci)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2015-01-14 00:13:44 +08:00
|
|
|
if (skb_vlan_tag_present(skb)) {
|
|
|
|
*vlan_tci = skb_vlan_tag_get(skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
} else {
|
2008-07-08 18:24:44 +08:00
|
|
|
*vlan_tci = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vlan_get_tag - get the VLAN ID from the skb
|
|
|
|
* @skb: skbuff to query
|
2015-05-28 09:07:26 +08:00
|
|
|
* @vlan_tci: buffer to store value
|
2008-07-08 18:24:44 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Returns error if the skb is not VLAN tagged
|
|
|
|
*/
|
2008-07-08 18:24:44 +08:00
|
|
|
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-04-19 10:04:27 +08:00
|
|
|
if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
|
2008-07-08 18:24:44 +08:00
|
|
|
return __vlan_hwaccel_get_tag(skb, vlan_tci);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2008-07-08 18:24:44 +08:00
|
|
|
return __vlan_get_tag(skb, vlan_tci);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-11 21:47:57 +08:00
|
|
|
/**
|
|
|
|
* vlan_get_protocol - get protocol EtherType.
|
|
|
|
* @skb: skbuff to query
|
2015-01-29 19:37:07 +08:00
|
|
|
* @type: first vlan protocol
|
|
|
|
* @depth: buffer to store length of eth and vlan tags in bytes
|
2010-11-11 21:47:57 +08:00
|
|
|
*
|
|
|
|
* Returns the EtherType of the packet, regardless of whether it is
|
|
|
|
* vlan encapsulated (normal or hardware accelerated) or not.
|
|
|
|
*/
|
2024-06-11 20:26:44 +08:00
|
|
|
static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
|
2015-01-29 19:37:07 +08:00
|
|
|
int *depth)
|
2010-11-11 21:47:57 +08:00
|
|
|
{
|
2024-06-11 20:26:44 +08:00
|
|
|
unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
|
2015-01-29 19:37:07 +08:00
|
|
|
|
|
|
|
/* if type is 802.1Q/AD then the header should already be
|
|
|
|
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
|
|
|
|
* ETH_HLEN otherwise
|
|
|
|
*/
|
2016-09-08 00:56:58 +08:00
|
|
|
if (eth_type_vlan(type)) {
|
2015-01-29 19:37:07 +08:00
|
|
|
if (vlan_depth) {
|
|
|
|
if (WARN_ON(vlan_depth < VLAN_HLEN))
|
|
|
|
return 0;
|
|
|
|
vlan_depth -= VLAN_HLEN;
|
|
|
|
} else {
|
|
|
|
vlan_depth = ETH_HLEN;
|
|
|
|
}
|
|
|
|
do {
|
2024-06-11 20:26:44 +08:00
|
|
|
struct vlan_hdr vhdr, *vh;
|
2015-01-29 19:37:07 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
|
|
|
|
if (unlikely(!vh || !--parse_depth))
|
2015-01-29 19:37:07 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
type = vh->h_vlan_encapsulated_proto;
|
|
|
|
vlan_depth += VLAN_HLEN;
|
2016-09-08 00:56:58 +08:00
|
|
|
} while (eth_type_vlan(type));
|
2010-11-11 21:47:57 +08:00
|
|
|
}
|
|
|
|
|
2015-01-29 19:37:07 +08:00
|
|
|
if (depth)
|
|
|
|
*depth = vlan_depth;
|
|
|
|
|
|
|
|
return type;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vlan_get_protocol - get protocol EtherType.
|
|
|
|
* @skb: skbuff to query
|
|
|
|
*
|
|
|
|
* Returns the EtherType of the packet, regardless of whether it is
|
|
|
|
* vlan encapsulated (normal or hardware accelerated) or not.
|
|
|
|
*/
|
2024-06-11 20:26:44 +08:00
|
|
|
static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
|
2015-01-29 19:37:07 +08:00
|
|
|
{
|
|
|
|
return __vlan_get_protocol(skb, skb->protocol, NULL);
|
2010-11-11 21:47:57 +08:00
|
|
|
}
|
2011-11-19 05:15:54 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
/* A getter for the SKB protocol field which will handle VLAN tags consistently
|
|
|
|
* whether VLAN acceleration is enabled or not.
|
|
|
|
*/
|
|
|
|
static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
|
|
|
|
{
|
|
|
|
if (!skip_vlan)
|
|
|
|
/* VLAN acceleration strips the VLAN header from the skb and
|
|
|
|
* moves it to skb->vlan_proto
|
|
|
|
*/
|
|
|
|
return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
|
|
|
|
|
|
|
|
return vlan_get_protocol(skb);
|
|
|
|
}
|
|
|
|
|
2011-11-19 05:15:54 +08:00
|
|
|
static inline void vlan_set_encap_proto(struct sk_buff *skb,
|
|
|
|
struct vlan_hdr *vhdr)
|
|
|
|
{
|
|
|
|
__be16 proto;
|
2013-02-22 07:32:27 +08:00
|
|
|
unsigned short *rawp;
|
2011-11-19 05:15:54 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Was a VLAN packet, grab the encapsulated protocol, which the layer
|
|
|
|
* three protocols care about.
|
|
|
|
*/
|
|
|
|
|
|
|
|
proto = vhdr->h_vlan_encapsulated_proto;
|
2015-05-05 05:34:10 +08:00
|
|
|
if (eth_proto_is_802_3(proto)) {
|
2011-11-19 05:15:54 +08:00
|
|
|
skb->protocol = proto;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-02-22 07:32:27 +08:00
|
|
|
rawp = (unsigned short *)(vhdr + 1);
|
|
|
|
if (*rawp == 0xFFFF)
|
2011-11-19 05:15:54 +08:00
|
|
|
/*
|
|
|
|
* This is a magic hack to spot IPX packets. Older Novell
|
|
|
|
* breaks the protocol design and runs IPX over 802.3 without
|
|
|
|
* an 802.2 LLC layer. We look for FFFF which isn't a used
|
|
|
|
* 802.2 SSAP/DSAP. This won't work for fault tolerant netware
|
|
|
|
* but does for the rest.
|
|
|
|
*/
|
|
|
|
skb->protocol = htons(ETH_P_802_3);
|
|
|
|
else
|
|
|
|
/*
|
|
|
|
* Real 802.2 LLC
|
|
|
|
*/
|
|
|
|
skb->protocol = htons(ETH_P_802_2);
|
|
|
|
}
|
2014-05-17 05:20:38 +08:00
|
|
|
|
2015-03-27 13:31:11 +08:00
|
|
|
/**
|
|
|
|
* skb_vlan_tagged - check if skb is vlan tagged.
|
|
|
|
* @skb: skbuff to query
|
|
|
|
*
|
|
|
|
* Returns true if the skb is tagged, regardless of whether it is hardware
|
|
|
|
* accelerated or not.
|
|
|
|
*/
|
|
|
|
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (!skb_vlan_tag_present(skb) &&
|
2016-09-08 00:56:58 +08:00
|
|
|
likely(!eth_type_vlan(skb->protocol)))
|
2015-03-27 13:31:11 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
|
|
|
|
* @skb: skbuff to query
|
|
|
|
*
|
|
|
|
* Returns true if the skb is tagged with multiple vlan headers, regardless
|
|
|
|
* of whether it is hardware accelerated or not.
|
|
|
|
*/
|
2018-04-17 17:46:14 +08:00
|
|
|
static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
|
2015-03-27 13:31:11 +08:00
|
|
|
{
|
|
|
|
__be16 protocol = skb->protocol;
|
|
|
|
|
|
|
|
if (!skb_vlan_tag_present(skb)) {
|
|
|
|
struct vlan_ethhdr *veh;
|
|
|
|
|
2016-09-08 00:56:58 +08:00
|
|
|
if (likely(!eth_type_vlan(protocol)))
|
2015-03-27 13:31:11 +08:00
|
|
|
return false;
|
|
|
|
|
2018-04-17 17:46:14 +08:00
|
|
|
if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
|
|
|
|
return false;
|
|
|
|
|
2015-03-27 13:31:11 +08:00
|
|
|
veh = (struct vlan_ethhdr *)skb->data;
|
|
|
|
protocol = veh->h_vlan_encapsulated_proto;
|
|
|
|
}
|
|
|
|
|
2016-09-08 00:56:58 +08:00
|
|
|
if (!eth_type_vlan(protocol))
|
2015-03-27 13:31:11 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-03-27 13:31:12 +08:00
|
|
|
/**
|
|
|
|
* vlan_features_check - drop unsafe features for skb with multiple tags.
|
|
|
|
* @skb: skbuff to query
|
|
|
|
* @features: features to be checked
|
|
|
|
*
|
|
|
|
* Returns features without unsafe ones if the skb has multiple tags.
|
|
|
|
*/
|
2018-04-17 17:46:14 +08:00
|
|
|
static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
|
2015-03-27 13:31:12 +08:00
|
|
|
netdev_features_t features)
|
|
|
|
{
|
2017-05-24 01:38:41 +08:00
|
|
|
if (skb_vlan_tagged_multi(skb)) {
|
|
|
|
/* In the case of multi-tagged packets, use a direct mask
|
|
|
|
* instead of using netdev_interesect_features(), to make
|
|
|
|
* sure that only devices supporting NETIF_F_HW_CSUM will
|
|
|
|
* have checksum offloading support.
|
|
|
|
*/
|
|
|
|
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
|
|
|
|
NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
|
|
|
|
NETIF_F_HW_VLAN_STAG_TX;
|
|
|
|
}
|
2015-03-27 13:31:12 +08:00
|
|
|
|
|
|
|
return features;
|
|
|
|
}
|
|
|
|
|
2015-06-01 20:55:06 +08:00
|
|
|
/**
|
|
|
|
* compare_vlan_header - Compare two vlan headers
|
|
|
|
* @h1: Pointer to vlan header
|
|
|
|
* @h2: Pointer to vlan header
|
|
|
|
*
|
|
|
|
* Compare two vlan headers, returns 0 if equal.
|
|
|
|
*
|
|
|
|
* Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
|
|
|
|
*/
|
|
|
|
static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
|
|
|
|
const struct vlan_hdr *h2)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
|
|
|
return *(u32 *)h1 ^ *(u32 *)h2;
|
|
|
|
#else
|
|
|
|
return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
|
|
|
|
((__force u32)h1->h_vlan_encapsulated_proto ^
|
|
|
|
(__force u32)h2->h_vlan_encapsulated_proto);
|
|
|
|
#endif
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* !(_LINUX_IF_VLAN_H_) */
|