2015-05-12 20:56:15 +08:00
|
|
|
#include <linux/kernel.h>
|
2011-11-28 13:22:18 +08:00
|
|
|
#include <linux/skbuff.h>
|
2012-01-25 05:03:33 +08:00
|
|
|
#include <linux/export.h>
|
2011-11-28 13:22:18 +08:00
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/ipv6.h>
|
|
|
|
#include <linux/if_vlan.h>
|
2017-08-09 20:41:19 +08:00
|
|
|
#include <net/dsa.h>
|
2017-10-02 16:41:16 +08:00
|
|
|
#include <net/dst_metadata.h>
|
2011-11-28 13:22:18 +08:00
|
|
|
#include <net/ip.h>
|
2012-07-18 16:11:12 +08:00
|
|
|
#include <net/ipv6.h>
|
2016-08-09 12:38:24 +08:00
|
|
|
#include <net/gre.h>
|
|
|
|
#include <net/pptp.h>
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 16:59:26 +08:00
|
|
|
#include <net/tipc.h>
|
2013-03-19 14:39:30 +08:00
|
|
|
#include <linux/igmp.h>
|
|
|
|
#include <linux/icmp.h>
|
|
|
|
#include <linux/sctp.h>
|
|
|
|
#include <linux/dccp.h>
|
2011-11-28 13:22:18 +08:00
|
|
|
#include <linux/if_tunnel.h>
|
|
|
|
#include <linux/if_pppox.h>
|
|
|
|
#include <linux/ppp_defs.h>
|
2015-05-12 20:56:16 +08:00
|
|
|
#include <linux/stddef.h>
|
2015-05-12 20:56:19 +08:00
|
|
|
#include <linux/if_ether.h>
|
2015-06-05 00:16:46 +08:00
|
|
|
#include <linux/mpls.h>
|
2017-05-24 00:40:44 +08:00
|
|
|
#include <linux/tcp.h>
|
2015-05-12 20:56:07 +08:00
|
|
|
#include <net/flow_dissector.h>
|
2014-09-06 07:20:26 +08:00
|
|
|
#include <scsi/fc/fc_fcoe.h>
|
2017-12-21 17:17:42 +08:00
|
|
|
#include <uapi/linux/batadv_packet.h>
|
2018-09-14 22:46:18 +08:00
|
|
|
#include <linux/bpf.h>
|
|
|
|
|
|
|
|
static DEFINE_MUTEX(flow_dissector_mutex);
|
2011-11-28 13:22:18 +08:00
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
static void dissector_set_key(struct flow_dissector *flow_dissector,
|
|
|
|
enum flow_dissector_key_id key_id)
|
2015-05-12 20:56:15 +08:00
|
|
|
{
|
|
|
|
flow_dissector->used_keys |= (1 << key_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
|
|
|
|
const struct flow_dissector_key *key,
|
|
|
|
unsigned int key_count)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
memset(flow_dissector, 0, sizeof(*flow_dissector));
|
|
|
|
|
|
|
|
for (i = 0; i < key_count; i++, key++) {
|
|
|
|
/* User should make sure that every key target offset is withing
|
|
|
|
* boundaries of unsigned short.
|
|
|
|
*/
|
|
|
|
BUG_ON(key->offset > USHRT_MAX);
|
2015-09-02 12:19:17 +08:00
|
|
|
BUG_ON(dissector_uses_key(flow_dissector,
|
|
|
|
key->key_id));
|
2015-05-12 20:56:15 +08:00
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
dissector_set_key(flow_dissector, key->key_id);
|
2015-05-12 20:56:15 +08:00
|
|
|
flow_dissector->offset[key->key_id] = key->offset;
|
|
|
|
}
|
|
|
|
|
2015-06-05 00:16:39 +08:00
|
|
|
/* Ensure that the dissector always includes control and basic key.
|
|
|
|
* That way we are able to avoid handling lack of these in fast path.
|
2015-05-12 20:56:15 +08:00
|
|
|
*/
|
2015-09-02 12:19:17 +08:00
|
|
|
BUG_ON(!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_CONTROL));
|
|
|
|
BUG_ON(!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_BASIC));
|
2015-05-12 20:56:15 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(skb_flow_dissector_init);
|
|
|
|
|
2018-09-14 22:46:18 +08:00
|
|
|
int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
|
|
|
|
struct bpf_prog *prog)
|
|
|
|
{
|
|
|
|
struct bpf_prog *attached;
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
net = current->nsproxy->net_ns;
|
|
|
|
mutex_lock(&flow_dissector_mutex);
|
|
|
|
attached = rcu_dereference_protected(net->flow_dissector_prog,
|
|
|
|
lockdep_is_held(&flow_dissector_mutex));
|
|
|
|
if (attached) {
|
|
|
|
/* Only one BPF program can be attached at a time */
|
|
|
|
mutex_unlock(&flow_dissector_mutex);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
rcu_assign_pointer(net->flow_dissector_prog, prog);
|
|
|
|
mutex_unlock(&flow_dissector_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
|
|
|
|
{
|
|
|
|
struct bpf_prog *attached;
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
net = current->nsproxy->net_ns;
|
|
|
|
mutex_lock(&flow_dissector_mutex);
|
|
|
|
attached = rcu_dereference_protected(net->flow_dissector_prog,
|
|
|
|
lockdep_is_held(&flow_dissector_mutex));
|
|
|
|
if (!attached) {
|
|
|
|
mutex_unlock(&flow_dissector_mutex);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
bpf_prog_put(attached);
|
|
|
|
RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
|
|
|
|
mutex_unlock(&flow_dissector_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
2016-12-07 20:48:27 +08:00
|
|
|
/**
|
|
|
|
* skb_flow_get_be16 - extract be16 entity
|
|
|
|
* @skb: sk_buff to extract from
|
|
|
|
* @poff: offset to extract at
|
|
|
|
* @data: raw buffer pointer to the packet
|
|
|
|
* @hlen: packet header length
|
|
|
|
*
|
|
|
|
* The function will try to retrieve a be32 entity at
|
|
|
|
* offset poff
|
|
|
|
*/
|
2017-01-10 03:18:01 +08:00
|
|
|
static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
|
|
|
|
void *data, int hlen)
|
2016-12-07 20:48:27 +08:00
|
|
|
{
|
|
|
|
__be16 *u, _u;
|
|
|
|
|
|
|
|
u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
|
|
|
|
if (u)
|
|
|
|
return *u;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-02 19:39:24 +08:00
|
|
|
/**
|
2014-08-26 08:03:46 +08:00
|
|
|
* __skb_flow_get_ports - extract the upper layer ports and return them
|
|
|
|
* @skb: sk_buff to extract the ports from
|
2013-10-02 19:39:24 +08:00
|
|
|
* @thoff: transport header offset
|
|
|
|
* @ip_proto: protocol for which to get port offset
|
2014-08-26 08:03:46 +08:00
|
|
|
* @data: raw buffer pointer to the packet, if NULL use skb->data
|
|
|
|
* @hlen: packet header length, if @data is NULL use skb_headlen(skb)
|
2013-10-02 19:39:24 +08:00
|
|
|
*
|
|
|
|
* The function will try to retrieve the ports at offset thoff + poff where poff
|
|
|
|
* is the protocol port offset returned from proto_ports_offset
|
|
|
|
*/
|
2014-08-24 03:13:41 +08:00
|
|
|
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
|
|
|
|
void *data, int hlen)
|
2013-10-02 19:39:24 +08:00
|
|
|
{
|
|
|
|
int poff = proto_ports_offset(ip_proto);
|
|
|
|
|
2014-08-24 03:13:41 +08:00
|
|
|
if (!data) {
|
|
|
|
data = skb->data;
|
|
|
|
hlen = skb_headlen(skb);
|
|
|
|
}
|
|
|
|
|
2013-10-02 19:39:24 +08:00
|
|
|
if (poff >= 0) {
|
|
|
|
__be32 *ports, _ports;
|
|
|
|
|
2014-08-24 03:13:41 +08:00
|
|
|
ports = __skb_header_pointer(skb, thoff + poff,
|
|
|
|
sizeof(_ports), data, hlen, &_ports);
|
2013-10-02 19:39:24 +08:00
|
|
|
if (ports)
|
|
|
|
return *ports;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-08-24 03:13:41 +08:00
|
|
|
EXPORT_SYMBOL(__skb_flow_get_ports);
|
2013-10-02 19:39:24 +08:00
|
|
|
|
2017-10-02 16:41:16 +08:00
|
|
|
static void
|
|
|
|
skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_control *ctrl;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ctrl = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_CONTROL,
|
|
|
|
target_container);
|
|
|
|
ctrl->addr_type = type;
|
|
|
|
}
|
|
|
|
|
2017-12-04 18:31:48 +08:00
|
|
|
void
|
|
|
|
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container)
|
2017-10-02 16:41:16 +08:00
|
|
|
{
|
|
|
|
struct ip_tunnel_info *info;
|
|
|
|
struct ip_tunnel_key *key;
|
|
|
|
|
|
|
|
/* A quick check to see if there might be something to do. */
|
|
|
|
if (!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_KEYID) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
2018-07-18 00:27:17 +08:00
|
|
|
FLOW_DISSECTOR_KEY_ENC_PORTS) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
2018-08-07 23:36:00 +08:00
|
|
|
FLOW_DISSECTOR_KEY_ENC_IP) &&
|
|
|
|
!dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_OPTS))
|
2017-10-02 16:41:16 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
info = skb_tunnel_info(skb);
|
|
|
|
if (!info)
|
|
|
|
return;
|
|
|
|
|
|
|
|
key = &info->key;
|
|
|
|
|
|
|
|
switch (ip_tunnel_info_af(info)) {
|
|
|
|
case AF_INET:
|
|
|
|
skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
|
|
|
flow_dissector,
|
|
|
|
target_container);
|
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
|
|
|
|
struct flow_dissector_key_ipv4_addrs *ipv4;
|
|
|
|
|
|
|
|
ipv4 = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
|
|
|
|
target_container);
|
|
|
|
ipv4->src = key->u.ipv4.src;
|
|
|
|
ipv4->dst = key->u.ipv4.dst;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
|
|
flow_dissector,
|
|
|
|
target_container);
|
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
|
|
|
|
struct flow_dissector_key_ipv6_addrs *ipv6;
|
|
|
|
|
|
|
|
ipv6 = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
|
|
|
|
target_container);
|
|
|
|
ipv6->src = key->u.ipv6.src;
|
|
|
|
ipv6->dst = key->u.ipv6.dst;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
|
|
|
|
struct flow_dissector_key_keyid *keyid;
|
|
|
|
|
|
|
|
keyid = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_KEYID,
|
|
|
|
target_container);
|
|
|
|
keyid->keyid = tunnel_id_to_key32(key->tun_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
|
|
|
|
struct flow_dissector_key_ports *tp;
|
|
|
|
|
|
|
|
tp = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_PORTS,
|
|
|
|
target_container);
|
|
|
|
tp->src = key->tp_src;
|
|
|
|
tp->dst = key->tp_dst;
|
|
|
|
}
|
2018-07-18 00:27:17 +08:00
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
|
|
|
|
struct flow_dissector_key_ip *ip;
|
|
|
|
|
|
|
|
ip = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_IP,
|
|
|
|
target_container);
|
|
|
|
ip->tos = key->tos;
|
|
|
|
ip->ttl = key->ttl;
|
|
|
|
}
|
2018-08-07 23:36:00 +08:00
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
|
|
|
|
struct flow_dissector_key_enc_opts *enc_opt;
|
|
|
|
|
|
|
|
enc_opt = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ENC_OPTS,
|
|
|
|
target_container);
|
|
|
|
|
|
|
|
if (info->options_len) {
|
|
|
|
enc_opt->len = info->options_len;
|
|
|
|
ip_tunnel_info_opts_get(enc_opt->data, info);
|
|
|
|
enc_opt->dst_opt_type = info->key.tun_flags &
|
|
|
|
TUNNEL_OPTIONS_PRESENT;
|
|
|
|
}
|
|
|
|
}
|
2017-10-02 16:41:16 +08:00
|
|
|
}
|
2017-12-04 18:31:48 +08:00
|
|
|
EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
|
2017-10-02 16:41:16 +08:00
|
|
|
|
2017-03-06 23:39:52 +08:00
|
|
|
static enum flow_dissect_ret
|
|
|
|
__skb_flow_dissect_mpls(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container, void *data, int nhoff, int hlen)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_keyid *key_keyid;
|
|
|
|
struct mpls_label *hdr, _hdr[2];
|
2017-04-23 04:52:46 +08:00
|
|
|
u32 entry, label;
|
2017-03-06 23:39:52 +08:00
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector,
|
2017-04-23 04:52:46 +08:00
|
|
|
FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
|
|
|
|
!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
|
2017-03-06 23:39:52 +08:00
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
|
|
|
|
hlen, &_hdr);
|
|
|
|
if (!hdr)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
2017-04-23 04:52:46 +08:00
|
|
|
entry = ntohl(hdr[0].entry);
|
|
|
|
label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
|
|
|
|
struct flow_dissector_key_mpls *key_mpls;
|
|
|
|
|
|
|
|
key_mpls = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_MPLS,
|
|
|
|
target_container);
|
|
|
|
key_mpls->mpls_label = label;
|
|
|
|
key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
|
|
|
|
>> MPLS_LS_TTL_SHIFT;
|
|
|
|
key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
|
|
|
|
>> MPLS_LS_TC_SHIFT;
|
|
|
|
key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
|
|
|
|
>> MPLS_LS_S_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (label == MPLS_LABEL_ENTROPY) {
|
2017-03-06 23:39:52 +08:00
|
|
|
key_keyid = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
|
|
|
|
target_container);
|
|
|
|
key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
|
|
|
|
}
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
}
|
|
|
|
|
2017-03-06 23:39:51 +08:00
|
|
|
static enum flow_dissect_ret
|
|
|
|
__skb_flow_dissect_arp(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container, void *data, int nhoff, int hlen)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_arp *key_arp;
|
|
|
|
struct {
|
|
|
|
unsigned char ar_sha[ETH_ALEN];
|
|
|
|
unsigned char ar_sip[4];
|
|
|
|
unsigned char ar_tha[ETH_ALEN];
|
|
|
|
unsigned char ar_tip[4];
|
|
|
|
} *arp_eth, _arp_eth;
|
|
|
|
const struct arphdr *arp;
|
2017-04-06 22:25:07 +08:00
|
|
|
struct arphdr _arp;
|
2017-03-06 23:39:51 +08:00
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
|
|
|
arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
|
|
|
|
hlen, &_arp);
|
|
|
|
if (!arp)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
|
|
|
|
arp->ar_pro != htons(ETH_P_IP) ||
|
|
|
|
arp->ar_hln != ETH_ALEN ||
|
|
|
|
arp->ar_pln != 4 ||
|
|
|
|
(arp->ar_op != htons(ARPOP_REPLY) &&
|
|
|
|
arp->ar_op != htons(ARPOP_REQUEST)))
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
|
|
|
|
sizeof(_arp_eth), data,
|
|
|
|
hlen, &_arp_eth);
|
|
|
|
if (!arp_eth)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
key_arp = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ARP,
|
|
|
|
target_container);
|
|
|
|
|
|
|
|
memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
|
|
|
|
memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
|
|
|
|
|
|
|
|
/* Only store the lower byte of the opcode;
|
|
|
|
* this covers ARPOP_REPLY and ARPOP_REQUEST.
|
|
|
|
*/
|
|
|
|
key_arp->op = ntohs(arp->ar_op) & 0xff;
|
|
|
|
|
|
|
|
ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
|
|
|
|
ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
|
|
|
|
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
}
|
|
|
|
|
2017-03-06 23:39:55 +08:00
|
|
|
static enum flow_dissect_ret
|
|
|
|
__skb_flow_dissect_gre(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector_key_control *key_control,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container, void *data,
|
|
|
|
__be16 *p_proto, int *p_nhoff, int *p_hlen,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_keyid *key_keyid;
|
|
|
|
struct gre_base_hdr *hdr, _hdr;
|
|
|
|
int offset = 0;
|
|
|
|
u16 gre_ver;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
|
|
|
|
data, *p_hlen, &_hdr);
|
|
|
|
if (!hdr)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
/* Only look inside GRE without routing */
|
|
|
|
if (hdr->flags & GRE_ROUTING)
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
|
|
|
/* Only look inside GRE for version 0 and 1 */
|
|
|
|
gre_ver = ntohs(hdr->flags & GRE_VERSION);
|
|
|
|
if (gre_ver > 1)
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
|
|
|
*p_proto = hdr->protocol;
|
|
|
|
if (gre_ver) {
|
|
|
|
/* Version1 must be PPTP, and check the flags */
|
|
|
|
if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset += sizeof(struct gre_base_hdr);
|
|
|
|
|
|
|
|
if (hdr->flags & GRE_CSUM)
|
2018-09-19 19:32:12 +08:00
|
|
|
offset += FIELD_SIZEOF(struct gre_full_hdr, csum) +
|
|
|
|
FIELD_SIZEOF(struct gre_full_hdr, reserved1);
|
2017-03-06 23:39:55 +08:00
|
|
|
|
|
|
|
if (hdr->flags & GRE_KEY) {
|
|
|
|
const __be32 *keyid;
|
|
|
|
__be32 _keyid;
|
|
|
|
|
|
|
|
keyid = __skb_header_pointer(skb, *p_nhoff + offset,
|
|
|
|
sizeof(_keyid),
|
|
|
|
data, *p_hlen, &_keyid);
|
|
|
|
if (!keyid)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_GRE_KEYID)) {
|
|
|
|
key_keyid = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_GRE_KEYID,
|
|
|
|
target_container);
|
|
|
|
if (gre_ver == 0)
|
|
|
|
key_keyid->keyid = *keyid;
|
|
|
|
else
|
|
|
|
key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
|
|
|
|
}
|
2018-09-19 19:32:12 +08:00
|
|
|
offset += FIELD_SIZEOF(struct gre_full_hdr, key);
|
2017-03-06 23:39:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr->flags & GRE_SEQ)
|
2018-09-19 19:32:12 +08:00
|
|
|
offset += FIELD_SIZEOF(struct pptp_gre_header, seq);
|
2017-03-06 23:39:55 +08:00
|
|
|
|
|
|
|
if (gre_ver == 0) {
|
|
|
|
if (*p_proto == htons(ETH_P_TEB)) {
|
|
|
|
const struct ethhdr *eth;
|
|
|
|
struct ethhdr _eth;
|
|
|
|
|
|
|
|
eth = __skb_header_pointer(skb, *p_nhoff + offset,
|
|
|
|
sizeof(_eth),
|
|
|
|
data, *p_hlen, &_eth);
|
|
|
|
if (!eth)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
*p_proto = eth->h_proto;
|
|
|
|
offset += sizeof(*eth);
|
|
|
|
|
|
|
|
/* Cap headers that we access via pointers at the
|
|
|
|
* end of the Ethernet header as our maximum alignment
|
|
|
|
* at that point is only 2 bytes.
|
|
|
|
*/
|
|
|
|
if (NET_IP_ALIGN)
|
|
|
|
*p_hlen = *p_nhoff + offset;
|
|
|
|
}
|
|
|
|
} else { /* version 1, must be PPTP */
|
|
|
|
u8 _ppp_hdr[PPP_HDRLEN];
|
|
|
|
u8 *ppp_hdr;
|
|
|
|
|
|
|
|
if (hdr->flags & GRE_ACK)
|
2018-09-19 19:32:12 +08:00
|
|
|
offset += FIELD_SIZEOF(struct pptp_gre_header, ack);
|
2017-03-06 23:39:55 +08:00
|
|
|
|
|
|
|
ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
|
|
|
|
sizeof(_ppp_hdr),
|
|
|
|
data, *p_hlen, _ppp_hdr);
|
|
|
|
if (!ppp_hdr)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
switch (PPP_PROTOCOL(ppp_hdr)) {
|
|
|
|
case PPP_IP:
|
|
|
|
*p_proto = htons(ETH_P_IP);
|
|
|
|
break;
|
|
|
|
case PPP_IPV6:
|
|
|
|
*p_proto = htons(ETH_P_IPV6);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Could probably catch some more like MPLS */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset += PPP_HDRLEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
*p_nhoff += offset;
|
|
|
|
key_control->flags |= FLOW_DIS_ENCAPSULATION;
|
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
2017-09-02 05:04:11 +08:00
|
|
|
return FLOW_DISSECT_RET_PROTO_AGAIN;
|
2017-03-06 23:39:55 +08:00
|
|
|
}
|
|
|
|
|
2017-12-21 17:17:42 +08:00
|
|
|
/**
|
|
|
|
* __skb_flow_dissect_batadv() - dissect batman-adv header
|
|
|
|
* @skb: sk_buff to with the batman-adv header
|
|
|
|
* @key_control: flow dissectors control key
|
|
|
|
* @data: raw buffer pointer to the packet, if NULL use skb->data
|
|
|
|
* @p_proto: pointer used to update the protocol to process next
|
|
|
|
* @p_nhoff: pointer used to update inner network header offset
|
|
|
|
* @hlen: packet header length
|
|
|
|
* @flags: any combination of FLOW_DISSECTOR_F_*
|
|
|
|
*
|
|
|
|
* ETH_P_BATMAN packets are tried to be dissected. Only
|
|
|
|
* &struct batadv_unicast packets are actually processed because they contain an
|
|
|
|
* inner ethernet header and are usually followed by actual network header. This
|
|
|
|
* allows the flow dissector to continue processing the packet.
|
|
|
|
*
|
|
|
|
* Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
|
|
|
|
* FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
|
|
|
|
* otherwise FLOW_DISSECT_RET_OUT_BAD
|
|
|
|
*/
|
|
|
|
static enum flow_dissect_ret
|
|
|
|
__skb_flow_dissect_batadv(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector_key_control *key_control,
|
|
|
|
void *data, __be16 *p_proto, int *p_nhoff, int hlen,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct {
|
|
|
|
struct batadv_unicast_packet batadv_unicast;
|
|
|
|
struct ethhdr eth;
|
|
|
|
} *hdr, _hdr;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
|
|
|
|
&_hdr);
|
|
|
|
if (!hdr)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
|
|
|
|
return FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
|
|
|
|
*p_proto = hdr->eth.h_proto;
|
|
|
|
*p_nhoff += sizeof(*hdr);
|
|
|
|
|
|
|
|
key_control->flags |= FLOW_DIS_ENCAPSULATION;
|
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
|
|
|
|
return FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
|
|
|
|
return FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
}
|
|
|
|
|
2017-05-24 00:40:44 +08:00
|
|
|
static void
|
|
|
|
__skb_flow_dissect_tcp(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container, void *data, int thoff, int hlen)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_tcp *key_tcp;
|
|
|
|
struct tcphdr *th, _th;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
|
|
|
|
return;
|
|
|
|
|
|
|
|
th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
|
|
|
|
if (!th)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_tcp = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_TCP,
|
|
|
|
target_container);
|
|
|
|
key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
|
|
|
|
}
|
|
|
|
|
2017-06-02 02:37:37 +08:00
|
|
|
static void
|
|
|
|
__skb_flow_dissect_ipv4(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container, void *data, const struct iphdr *iph)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_ip *key_ip;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_ip = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IP,
|
|
|
|
target_container);
|
|
|
|
key_ip->tos = iph->tos;
|
|
|
|
key_ip->ttl = iph->ttl;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
__skb_flow_dissect_ipv6(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container, void *data, const struct ipv6hdr *iph)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_ip *key_ip;
|
|
|
|
|
|
|
|
if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
|
|
|
|
return;
|
|
|
|
|
|
|
|
key_ip = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IP,
|
|
|
|
target_container);
|
|
|
|
key_ip->tos = ipv6_get_dsfield(iph);
|
|
|
|
key_ip->ttl = iph->hop_limit;
|
|
|
|
}
|
|
|
|
|
2017-09-02 05:04:12 +08:00
|
|
|
/* Maximum number of protocol headers that can be parsed in
|
|
|
|
* __skb_flow_dissect
|
|
|
|
*/
|
|
|
|
#define MAX_FLOW_DISSECT_HDRS 15
|
|
|
|
|
|
|
|
static bool skb_flow_dissect_allowed(int *num_hdrs)
|
|
|
|
{
|
|
|
|
++*num_hdrs;
|
|
|
|
|
|
|
|
return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
|
|
|
|
}
|
|
|
|
|
2018-09-14 22:46:18 +08:00
|
|
|
static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container)
|
|
|
|
{
|
|
|
|
struct flow_dissector_key_control *key_control;
|
|
|
|
struct flow_dissector_key_basic *key_basic;
|
|
|
|
struct flow_dissector_key_addrs *key_addrs;
|
|
|
|
struct flow_dissector_key_ports *key_ports;
|
|
|
|
|
|
|
|
key_control = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
|
target_container);
|
|
|
|
key_control->thoff = flow_keys->thoff;
|
|
|
|
if (flow_keys->is_frag)
|
|
|
|
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
|
|
|
|
if (flow_keys->is_first_frag)
|
|
|
|
key_control->flags |= FLOW_DIS_FIRST_FRAG;
|
|
|
|
if (flow_keys->is_encap)
|
|
|
|
key_control->flags |= FLOW_DIS_ENCAPSULATION;
|
|
|
|
|
|
|
|
key_basic = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_BASIC,
|
|
|
|
target_container);
|
|
|
|
key_basic->n_proto = flow_keys->n_proto;
|
|
|
|
key_basic->ip_proto = flow_keys->ip_proto;
|
|
|
|
|
|
|
|
if (flow_keys->addr_proto == ETH_P_IP &&
|
|
|
|
dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
|
|
|
|
key_addrs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
|
|
|
target_container);
|
|
|
|
key_addrs->v4addrs.src = flow_keys->ipv4_src;
|
|
|
|
key_addrs->v4addrs.dst = flow_keys->ipv4_dst;
|
|
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
|
|
|
} else if (flow_keys->addr_proto == ETH_P_IPV6 &&
|
|
|
|
dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
|
|
|
|
key_addrs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
|
|
target_container);
|
|
|
|
memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src,
|
|
|
|
sizeof(key_addrs->v6addrs));
|
|
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) {
|
|
|
|
key_ports = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_PORTS,
|
|
|
|
target_container);
|
|
|
|
key_ports->src = flow_keys->sport;
|
|
|
|
key_ports->dst = flow_keys->dport;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-29 00:53:53 +08:00
|
|
|
bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
|
|
|
|
const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
struct bpf_flow_keys *flow_keys)
|
|
|
|
{
|
|
|
|
struct bpf_skb_data_end cb_saved;
|
|
|
|
struct bpf_skb_data_end *cb;
|
|
|
|
u32 result;
|
|
|
|
|
|
|
|
/* Note that even though the const qualifier is discarded
|
|
|
|
* throughout the execution of the BPF program, all changes(the
|
|
|
|
* control block) are reverted after the BPF program returns.
|
|
|
|
* Therefore, __skb_flow_dissect does not alter the skb.
|
|
|
|
*/
|
|
|
|
|
|
|
|
cb = (struct bpf_skb_data_end *)skb->cb;
|
|
|
|
|
|
|
|
/* Save Control Block */
|
|
|
|
memcpy(&cb_saved, cb, sizeof(cb_saved));
|
|
|
|
memset(cb, 0, sizeof(*cb));
|
|
|
|
|
|
|
|
/* Pass parameters to the BPF program */
|
|
|
|
memset(flow_keys, 0, sizeof(*flow_keys));
|
|
|
|
cb->qdisc_cb.flow_keys = flow_keys;
|
2019-04-02 04:57:31 +08:00
|
|
|
flow_keys->n_proto = skb->protocol;
|
2019-01-29 00:53:53 +08:00
|
|
|
flow_keys->nhoff = skb_network_offset(skb);
|
|
|
|
flow_keys->thoff = flow_keys->nhoff;
|
|
|
|
|
|
|
|
bpf_compute_data_pointers((struct sk_buff *)skb);
|
|
|
|
result = BPF_PROG_RUN(prog, skb);
|
|
|
|
|
|
|
|
/* Restore state */
|
|
|
|
memcpy(cb, &cb_saved, sizeof(cb_saved));
|
|
|
|
|
2019-04-02 04:57:32 +08:00
|
|
|
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
|
|
|
|
skb_network_offset(skb), skb->len);
|
2019-01-29 00:53:53 +08:00
|
|
|
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
|
|
|
|
flow_keys->nhoff, skb->len);
|
|
|
|
|
|
|
|
return result == BPF_OK;
|
|
|
|
}
|
|
|
|
|
2014-08-26 08:03:47 +08:00
|
|
|
/**
|
|
|
|
* __skb_flow_dissect - extract the flow_keys struct and return it
|
|
|
|
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
|
2015-05-12 20:56:16 +08:00
|
|
|
* @flow_dissector: list of keys to dissect
|
|
|
|
* @target_container: target structure to put dissected values into
|
2014-08-26 08:03:47 +08:00
|
|
|
* @data: raw buffer pointer to the packet, if NULL use skb->data
|
|
|
|
* @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
|
|
|
|
* @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
|
|
|
|
* @hlen: packet header length, if @data is NULL use skb_headlen(skb)
|
|
|
|
*
|
2015-05-12 20:56:16 +08:00
|
|
|
* The function will try to retrieve individual keys into target specified
|
|
|
|
* by flow_dissector from either the skbuff or a raw buffer specified by the
|
|
|
|
* rest parameters.
|
|
|
|
*
|
|
|
|
* Caller must take care of zeroing target container memory.
|
2014-08-26 08:03:47 +08:00
|
|
|
*/
|
2015-05-12 20:56:16 +08:00
|
|
|
bool __skb_flow_dissect(const struct sk_buff *skb,
|
|
|
|
struct flow_dissector *flow_dissector,
|
|
|
|
void *target_container,
|
2015-09-02 00:24:27 +08:00
|
|
|
void *data, __be16 proto, int nhoff, int hlen,
|
|
|
|
unsigned int flags)
|
2011-11-28 13:22:18 +08:00
|
|
|
{
|
2015-06-05 00:16:39 +08:00
|
|
|
struct flow_dissector_key_control *key_control;
|
2015-05-12 20:56:16 +08:00
|
|
|
struct flow_dissector_key_basic *key_basic;
|
|
|
|
struct flow_dissector_key_addrs *key_addrs;
|
|
|
|
struct flow_dissector_key_ports *key_ports;
|
2016-12-07 20:48:27 +08:00
|
|
|
struct flow_dissector_key_icmp *key_icmp;
|
2015-06-05 00:16:43 +08:00
|
|
|
struct flow_dissector_key_tags *key_tags;
|
2016-08-17 18:36:11 +08:00
|
|
|
struct flow_dissector_key_vlan *key_vlan;
|
2017-09-02 05:04:11 +08:00
|
|
|
enum flow_dissect_ret fdret;
|
2018-07-06 13:38:14 +08:00
|
|
|
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
|
2017-09-02 05:04:12 +08:00
|
|
|
int num_hdrs = 0;
|
2015-06-25 21:10:32 +08:00
|
|
|
u8 ip_proto = 0;
|
2016-11-10 08:04:46 +08:00
|
|
|
bool ret;
|
2011-11-28 13:22:18 +08:00
|
|
|
|
2014-08-24 03:13:41 +08:00
|
|
|
if (!data) {
|
|
|
|
data = skb->data;
|
2016-08-17 18:36:10 +08:00
|
|
|
proto = skb_vlan_tag_present(skb) ?
|
|
|
|
skb->vlan_proto : skb->protocol;
|
2014-08-26 08:03:47 +08:00
|
|
|
nhoff = skb_network_offset(skb);
|
2014-08-24 03:13:41 +08:00
|
|
|
hlen = skb_headlen(skb);
|
2017-08-10 16:09:03 +08:00
|
|
|
#if IS_ENABLED(CONFIG_NET_DSA)
|
2017-08-15 21:43:40 +08:00
|
|
|
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) {
|
2017-08-09 20:41:19 +08:00
|
|
|
const struct dsa_device_ops *ops;
|
|
|
|
int offset;
|
|
|
|
|
|
|
|
ops = skb->dev->dsa_ptr->tag_ops;
|
|
|
|
if (ops->flow_dissect &&
|
|
|
|
!ops->flow_dissect(skb, &proto, &offset)) {
|
|
|
|
hlen -= offset;
|
|
|
|
nhoff += offset;
|
|
|
|
}
|
|
|
|
}
|
2017-08-10 16:09:03 +08:00
|
|
|
#endif
|
2014-08-24 03:13:41 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 00:16:39 +08:00
|
|
|
/* It is ensured by skb_flow_dissector_init() that control key will
|
|
|
|
* be always present.
|
|
|
|
*/
|
|
|
|
key_control = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
|
target_container);
|
|
|
|
|
2015-05-12 20:56:16 +08:00
|
|
|
/* It is ensured by skb_flow_dissector_init() that basic key will
|
|
|
|
* be always present.
|
|
|
|
*/
|
|
|
|
key_basic = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_BASIC,
|
|
|
|
target_container);
|
2011-11-28 13:22:18 +08:00
|
|
|
|
2018-09-25 04:49:57 +08:00
|
|
|
if (skb) {
|
2019-01-29 00:53:53 +08:00
|
|
|
struct bpf_flow_keys flow_keys;
|
|
|
|
struct bpf_prog *attached = NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
2018-09-25 04:49:57 +08:00
|
|
|
if (skb->dev)
|
|
|
|
attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog);
|
|
|
|
else if (skb->sk)
|
|
|
|
attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog);
|
|
|
|
else
|
|
|
|
WARN_ON_ONCE(1);
|
2018-09-14 22:46:18 +08:00
|
|
|
|
2019-01-29 00:53:53 +08:00
|
|
|
if (attached) {
|
|
|
|
ret = __skb_flow_bpf_dissect(attached, skb,
|
|
|
|
flow_dissector,
|
|
|
|
&flow_keys);
|
|
|
|
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
|
|
|
|
target_container);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
2018-09-14 22:46:18 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
|
2015-05-12 20:56:19 +08:00
|
|
|
struct ethhdr *eth = eth_hdr(skb);
|
|
|
|
struct flow_dissector_key_eth_addrs *key_eth_addrs;
|
|
|
|
|
|
|
|
key_eth_addrs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ETH_ADDRS,
|
|
|
|
target_container);
|
|
|
|
memcpy(key_eth_addrs, ð->h_dest, sizeof(*key_eth_addrs));
|
|
|
|
}
|
|
|
|
|
2017-03-06 23:39:54 +08:00
|
|
|
proto_again:
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = FLOW_DISSECT_RET_CONTINUE;
|
|
|
|
|
2011-11-28 13:22:18 +08:00
|
|
|
switch (proto) {
|
2014-03-13 01:04:17 +08:00
|
|
|
case htons(ETH_P_IP): {
|
2011-11-28 13:22:18 +08:00
|
|
|
const struct iphdr *iph;
|
|
|
|
struct iphdr _iph;
|
2017-09-02 05:04:11 +08:00
|
|
|
|
2014-08-24 03:13:41 +08:00
|
|
|
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
|
2017-09-02 05:04:11 +08:00
|
|
|
if (!iph || iph->ihl < 5) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-11-08 00:37:28 +08:00
|
|
|
nhoff += iph->ihl * 4;
|
2011-11-28 13:22:18 +08:00
|
|
|
|
2013-11-08 00:37:28 +08:00
|
|
|
ip_proto = iph->protocol;
|
|
|
|
|
2016-02-25 01:29:38 +08:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
|
|
|
|
key_addrs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
|
|
|
target_container);
|
|
|
|
|
|
|
|
memcpy(&key_addrs->v4addrs, &iph->saddr,
|
|
|
|
sizeof(key_addrs->v4addrs));
|
|
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
|
|
|
}
|
2015-09-02 00:24:28 +08:00
|
|
|
|
|
|
|
if (ip_is_fragment(iph)) {
|
2015-09-02 07:46:08 +08:00
|
|
|
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
|
2015-09-02 00:24:28 +08:00
|
|
|
|
|
|
|
if (iph->frag_off & htons(IP_OFFSET)) {
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
2015-09-02 00:24:28 +08:00
|
|
|
} else {
|
2015-09-02 07:46:08 +08:00
|
|
|
key_control->flags |= FLOW_DIS_FIRST_FRAG;
|
2017-09-02 05:04:11 +08:00
|
|
|
if (!(flags &
|
|
|
|
FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
2015-09-02 00:24:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-02 02:37:37 +08:00
|
|
|
__skb_flow_dissect_ipv4(skb, flow_dissector,
|
|
|
|
target_container, data, iph);
|
|
|
|
|
2017-09-02 05:04:11 +08:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
2015-09-02 00:24:30 +08:00
|
|
|
|
2011-11-28 13:22:18 +08:00
|
|
|
break;
|
|
|
|
}
|
2014-03-13 01:04:17 +08:00
|
|
|
case htons(ETH_P_IPV6): {
|
2011-11-28 13:22:18 +08:00
|
|
|
const struct ipv6hdr *iph;
|
|
|
|
struct ipv6hdr _iph;
|
2014-07-02 12:33:01 +08:00
|
|
|
|
2014-08-24 03:13:41 +08:00
|
|
|
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
|
2017-09-02 05:04:11 +08:00
|
|
|
if (!iph) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
2011-11-28 13:22:18 +08:00
|
|
|
|
|
|
|
ip_proto = iph->nexthdr;
|
|
|
|
nhoff += sizeof(struct ipv6hdr);
|
2014-07-02 12:33:01 +08:00
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
|
2016-02-25 01:29:57 +08:00
|
|
|
key_addrs = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
|
|
target_container);
|
2014-10-11 03:09:12 +08:00
|
|
|
|
2016-02-25 01:29:57 +08:00
|
|
|
memcpy(&key_addrs->v6addrs, &iph->saddr,
|
|
|
|
sizeof(key_addrs->v6addrs));
|
2015-06-05 00:16:40 +08:00
|
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
2015-05-12 20:56:18 +08:00
|
|
|
}
|
2015-06-05 00:16:44 +08:00
|
|
|
|
2016-02-09 18:49:54 +08:00
|
|
|
if ((dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
|
|
|
|
(flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
|
|
|
|
ip6_flowlabel(iph)) {
|
|
|
|
__be32 flow_label = ip6_flowlabel(iph);
|
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
|
2015-06-05 00:16:44 +08:00
|
|
|
key_tags = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_FLOW_LABEL,
|
|
|
|
target_container);
|
|
|
|
key_tags->flow_label = ntohl(flow_label);
|
2015-05-22 17:05:58 +08:00
|
|
|
}
|
2017-09-02 05:04:11 +08:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
2014-07-02 12:33:01 +08:00
|
|
|
}
|
|
|
|
|
2017-06-02 02:37:37 +08:00
|
|
|
__skb_flow_dissect_ipv6(skb, flow_dissector,
|
|
|
|
target_container, data, iph);
|
|
|
|
|
2015-09-02 00:24:30 +08:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
2015-09-02 00:24:30 +08:00
|
|
|
|
2011-11-28 13:22:18 +08:00
|
|
|
break;
|
|
|
|
}
|
2014-03-13 01:04:17 +08:00
|
|
|
case htons(ETH_P_8021AD):
|
|
|
|
case htons(ETH_P_8021Q): {
|
2018-07-06 13:38:14 +08:00
|
|
|
const struct vlan_hdr *vlan = NULL;
|
2016-10-25 05:40:30 +08:00
|
|
|
struct vlan_hdr _vlan;
|
2018-07-06 13:38:12 +08:00
|
|
|
__be16 saved_vlan_tpid = proto;
|
2011-11-28 13:22:18 +08:00
|
|
|
|
2018-07-06 13:38:14 +08:00
|
|
|
if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
|
|
|
|
skb && skb_vlan_tag_present(skb)) {
|
2016-08-17 18:36:10 +08:00
|
|
|
proto = skb->protocol;
|
2018-07-06 13:38:14 +08:00
|
|
|
} else {
|
2016-08-17 18:36:10 +08:00
|
|
|
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
|
|
|
|
data, hlen, &_vlan);
|
2017-09-02 05:04:11 +08:00
|
|
|
if (!vlan) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-08-17 18:36:10 +08:00
|
|
|
proto = vlan->h_vlan_encapsulated_proto;
|
|
|
|
nhoff += sizeof(*vlan);
|
|
|
|
}
|
2011-11-28 13:22:18 +08:00
|
|
|
|
2018-07-06 13:38:14 +08:00
|
|
|
if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
|
|
|
|
dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
|
|
|
|
} else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
|
|
|
|
dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
|
|
|
|
} else {
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dissector_uses_key(flow_dissector, dissector_vlan)) {
|
2016-08-17 18:36:11 +08:00
|
|
|
key_vlan = skb_flow_dissector_target(flow_dissector,
|
2018-07-06 13:38:14 +08:00
|
|
|
dissector_vlan,
|
2015-06-05 00:16:43 +08:00
|
|
|
target_container);
|
|
|
|
|
2018-07-06 13:38:14 +08:00
|
|
|
if (!vlan) {
|
2016-08-17 18:36:11 +08:00
|
|
|
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
|
2018-11-08 01:07:03 +08:00
|
|
|
key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
|
2016-08-17 18:36:11 +08:00
|
|
|
} else {
|
|
|
|
key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
|
2016-08-17 18:36:10 +08:00
|
|
|
VLAN_VID_MASK;
|
2016-08-17 18:36:11 +08:00
|
|
|
key_vlan->vlan_priority =
|
|
|
|
(ntohs(vlan->h_vlan_TCI) &
|
|
|
|
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
|
|
|
|
}
|
2018-07-06 13:38:12 +08:00
|
|
|
key_vlan->vlan_tpid = saved_vlan_tpid;
|
2015-06-05 00:16:43 +08:00
|
|
|
}
|
|
|
|
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
2011-11-28 13:22:18 +08:00
|
|
|
}
|
2014-03-13 01:04:17 +08:00
|
|
|
case htons(ETH_P_PPP_SES): {
|
2011-11-28 13:22:18 +08:00
|
|
|
struct {
|
|
|
|
struct pppoe_hdr hdr;
|
|
|
|
__be16 proto;
|
|
|
|
} *hdr, _hdr;
|
2014-08-24 03:13:41 +08:00
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
|
2017-09-02 05:04:11 +08:00
|
|
|
if (!hdr) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-11-28 13:22:18 +08:00
|
|
|
proto = hdr->proto;
|
|
|
|
nhoff += PPPOE_SES_HLEN;
|
|
|
|
switch (proto) {
|
2014-03-13 01:04:17 +08:00
|
|
|
case htons(PPP_IP):
|
2017-09-02 05:04:11 +08:00
|
|
|
proto = htons(ETH_P_IP);
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
2014-03-13 01:04:17 +08:00
|
|
|
case htons(PPP_IPV6):
|
2017-09-02 05:04:11 +08:00
|
|
|
proto = htons(ETH_P_IPV6);
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
2011-11-28 13:22:18 +08:00
|
|
|
default:
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
2011-11-28 13:22:18 +08:00
|
|
|
}
|
2017-09-02 05:04:11 +08:00
|
|
|
break;
|
2011-11-28 13:22:18 +08:00
|
|
|
}
|
2015-01-23 00:10:32 +08:00
|
|
|
case htons(ETH_P_TIPC): {
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 16:59:26 +08:00
|
|
|
struct tipc_basic_hdr *hdr, _hdr;
|
|
|
|
|
|
|
|
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
|
|
|
|
data, hlen, &_hdr);
|
2017-09-02 05:04:11 +08:00
|
|
|
if (!hdr) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
2015-05-12 20:56:16 +08:00
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 16:59:26 +08:00
|
|
|
FLOW_DISSECTOR_KEY_TIPC)) {
|
2015-05-12 20:56:16 +08:00
|
|
|
key_addrs = skb_flow_dissector_target(flow_dissector,
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 16:59:26 +08:00
|
|
|
FLOW_DISSECTOR_KEY_TIPC,
|
2015-05-12 20:56:16 +08:00
|
|
|
target_container);
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 16:59:26 +08:00
|
|
|
key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
|
|
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
|
2015-05-12 20:56:16 +08:00
|
|
|
}
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
2015-01-23 00:10:32 +08:00
|
|
|
}
|
2015-06-05 00:16:46 +08:00
|
|
|
|
|
|
|
case htons(ETH_P_MPLS_UC):
|
2017-03-06 23:39:52 +08:00
|
|
|
case htons(ETH_P_MPLS_MC):
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
|
2017-03-06 23:39:52 +08:00
|
|
|
target_container, data,
|
2017-09-02 05:04:11 +08:00
|
|
|
nhoff, hlen);
|
|
|
|
break;
|
2014-09-06 07:20:26 +08:00
|
|
|
case htons(ETH_P_FCOE):
|
2017-09-02 05:04:11 +08:00
|
|
|
if ((hlen - nhoff) < FCOE_HEADER_LEN) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
2016-02-25 01:29:51 +08:00
|
|
|
|
|
|
|
nhoff += FCOE_HEADER_LEN;
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
2017-01-11 21:05:42 +08:00
|
|
|
|
|
|
|
case htons(ETH_P_ARP):
|
2017-03-06 23:39:51 +08:00
|
|
|
case htons(ETH_P_RARP):
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = __skb_flow_dissect_arp(skb, flow_dissector,
|
2017-03-06 23:39:51 +08:00
|
|
|
target_container, data,
|
2017-09-02 05:04:11 +08:00
|
|
|
nhoff, hlen);
|
|
|
|
break;
|
|
|
|
|
2017-12-21 17:17:42 +08:00
|
|
|
case htons(ETH_P_BATMAN):
|
|
|
|
fdret = __skb_flow_dissect_batadv(skb, key_control, data,
|
|
|
|
&proto, &nhoff, hlen, flags);
|
|
|
|
break;
|
|
|
|
|
2017-09-02 05:04:11 +08:00
|
|
|
default:
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Process result of proto processing */
|
|
|
|
switch (fdret) {
|
|
|
|
case FLOW_DISSECT_RET_OUT_GOOD:
|
|
|
|
goto out_good;
|
|
|
|
case FLOW_DISSECT_RET_PROTO_AGAIN:
|
2017-09-02 05:04:12 +08:00
|
|
|
if (skb_flow_dissect_allowed(&num_hdrs))
|
|
|
|
goto proto_again;
|
|
|
|
goto out_good;
|
2017-09-02 05:04:11 +08:00
|
|
|
case FLOW_DISSECT_RET_CONTINUE:
|
|
|
|
case FLOW_DISSECT_RET_IPPROTO_AGAIN:
|
|
|
|
break;
|
|
|
|
case FLOW_DISSECT_RET_OUT_BAD:
|
2011-11-28 13:22:18 +08:00
|
|
|
default:
|
2015-09-02 00:24:26 +08:00
|
|
|
goto out_bad;
|
2011-11-28 13:22:18 +08:00
|
|
|
}
|
|
|
|
|
2015-06-13 00:01:06 +08:00
|
|
|
ip_proto_again:
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = FLOW_DISSECT_RET_CONTINUE;
|
|
|
|
|
2011-11-28 13:22:18 +08:00
|
|
|
switch (ip_proto) {
|
2017-03-06 23:39:55 +08:00
|
|
|
case IPPROTO_GRE:
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
|
2017-03-06 23:39:55 +08:00
|
|
|
target_container, data,
|
2017-09-02 05:04:11 +08:00
|
|
|
&proto, &nhoff, &hlen, flags);
|
|
|
|
break;
|
|
|
|
|
2015-06-13 00:01:06 +08:00
|
|
|
case NEXTHDR_HOP:
|
|
|
|
case NEXTHDR_ROUTING:
|
|
|
|
case NEXTHDR_DEST: {
|
|
|
|
u8 _opthdr[2], *opthdr;
|
|
|
|
|
|
|
|
if (proto != htons(ETH_P_IPV6))
|
|
|
|
break;
|
|
|
|
|
|
|
|
opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
|
|
|
|
data, hlen, &_opthdr);
|
2017-09-02 05:04:11 +08:00
|
|
|
if (!opthdr) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
2015-06-13 00:01:06 +08:00
|
|
|
|
2015-06-13 10:31:32 +08:00
|
|
|
ip_proto = opthdr[0];
|
|
|
|
nhoff += (opthdr[1] + 1) << 3;
|
2015-06-13 00:01:06 +08:00
|
|
|
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
|
|
|
|
break;
|
2015-06-13 00:01:06 +08:00
|
|
|
}
|
2015-09-02 00:24:29 +08:00
|
|
|
case NEXTHDR_FRAGMENT: {
|
|
|
|
struct frag_hdr _fh, *fh;
|
|
|
|
|
|
|
|
if (proto != htons(ETH_P_IPV6))
|
|
|
|
break;
|
|
|
|
|
|
|
|
fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
|
|
|
|
data, hlen, &_fh);
|
|
|
|
|
2017-09-02 05:04:11 +08:00
|
|
|
if (!fh) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_BAD;
|
|
|
|
break;
|
|
|
|
}
|
2015-09-02 00:24:29 +08:00
|
|
|
|
2015-09-02 07:46:08 +08:00
|
|
|
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
|
2015-09-02 00:24:29 +08:00
|
|
|
|
|
|
|
nhoff += sizeof(_fh);
|
2016-02-25 01:29:44 +08:00
|
|
|
ip_proto = fh->nexthdr;
|
2015-09-02 00:24:29 +08:00
|
|
|
|
|
|
|
if (!(fh->frag_off & htons(IP6_OFFSET))) {
|
2015-09-02 07:46:08 +08:00
|
|
|
key_control->flags |= FLOW_DIS_FIRST_FRAG;
|
2017-09-02 05:04:11 +08:00
|
|
|
if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
|
|
|
|
fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
2015-09-02 00:24:29 +08:00
|
|
|
}
|
2017-09-02 05:04:11 +08:00
|
|
|
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
2015-09-02 00:24:29 +08:00
|
|
|
}
|
2011-11-28 13:22:18 +08:00
|
|
|
case IPPROTO_IPIP:
|
2013-07-30 02:07:36 +08:00
|
|
|
proto = htons(ETH_P_IP);
|
2015-09-02 00:24:32 +08:00
|
|
|
|
2015-09-02 07:46:08 +08:00
|
|
|
key_control->flags |= FLOW_DIS_ENCAPSULATION;
|
2017-09-02 05:04:11 +08:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
2015-09-02 00:24:32 +08:00
|
|
|
|
2013-07-30 02:07:42 +08:00
|
|
|
case IPPROTO_IPV6:
|
|
|
|
proto = htons(ETH_P_IPV6);
|
2015-09-02 00:24:32 +08:00
|
|
|
|
2015-09-02 07:46:08 +08:00
|
|
|
key_control->flags |= FLOW_DIS_ENCAPSULATION;
|
2017-09-02 05:04:11 +08:00
|
|
|
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
|
|
|
|
fdret = FLOW_DISSECT_RET_OUT_GOOD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
|
|
|
|
2015-09-02 00:24:32 +08:00
|
|
|
|
2015-06-05 00:16:46 +08:00
|
|
|
case IPPROTO_MPLS:
|
|
|
|
proto = htons(ETH_P_MPLS_UC);
|
2017-09-02 05:04:11 +08:00
|
|
|
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
|
|
|
break;
|
|
|
|
|
2017-05-24 00:40:44 +08:00
|
|
|
case IPPROTO_TCP:
|
|
|
|
__skb_flow_dissect_tcp(skb, flow_dissector, target_container,
|
|
|
|
data, nhoff, hlen);
|
|
|
|
break;
|
2017-09-02 05:04:11 +08:00
|
|
|
|
2011-11-28 13:22:18 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-11-10 08:53:06 +08:00
|
|
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
|
|
|
|
!(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
|
2015-05-12 20:56:16 +08:00
|
|
|
key_ports = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_PORTS,
|
|
|
|
target_container);
|
|
|
|
key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
|
|
|
|
data, hlen);
|
|
|
|
}
|
2014-10-11 03:09:12 +08:00
|
|
|
|
2016-12-07 20:48:27 +08:00
|
|
|
if (dissector_uses_key(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ICMP)) {
|
|
|
|
key_icmp = skb_flow_dissector_target(flow_dissector,
|
|
|
|
FLOW_DISSECTOR_KEY_ICMP,
|
|
|
|
target_container);
|
|
|
|
key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
|
|
|
|
}
|
|
|
|
|
2017-09-02 05:04:11 +08:00
|
|
|
/* Process result of IP proto processing */
|
|
|
|
switch (fdret) {
|
|
|
|
case FLOW_DISSECT_RET_PROTO_AGAIN:
|
2017-09-02 05:04:12 +08:00
|
|
|
if (skb_flow_dissect_allowed(&num_hdrs))
|
|
|
|
goto proto_again;
|
|
|
|
break;
|
2017-09-02 05:04:11 +08:00
|
|
|
case FLOW_DISSECT_RET_IPPROTO_AGAIN:
|
2017-09-02 05:04:12 +08:00
|
|
|
if (skb_flow_dissect_allowed(&num_hdrs))
|
|
|
|
goto ip_proto_again;
|
|
|
|
break;
|
2017-09-02 05:04:11 +08:00
|
|
|
case FLOW_DISSECT_RET_OUT_GOOD:
|
|
|
|
case FLOW_DISSECT_RET_CONTINUE:
|
|
|
|
break;
|
|
|
|
case FLOW_DISSECT_RET_OUT_BAD:
|
|
|
|
default:
|
|
|
|
goto out_bad;
|
|
|
|
}
|
|
|
|
|
2015-09-02 00:24:26 +08:00
|
|
|
out_good:
|
|
|
|
ret = true;
|
|
|
|
|
2016-11-10 08:04:46 +08:00
|
|
|
out:
|
2018-01-18 06:21:13 +08:00
|
|
|
key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
|
2015-09-02 00:24:26 +08:00
|
|
|
key_basic->n_proto = proto;
|
|
|
|
key_basic->ip_proto = ip_proto;
|
|
|
|
|
|
|
|
return ret;
|
2016-11-10 08:04:46 +08:00
|
|
|
|
|
|
|
out_bad:
|
|
|
|
ret = false;
|
|
|
|
goto out;
|
2011-11-28 13:22:18 +08:00
|
|
|
}
|
2014-08-24 03:13:41 +08:00
|
|
|
EXPORT_SYMBOL(__skb_flow_dissect);
|
2013-01-21 08:39:24 +08:00
|
|
|
|
|
|
|
static u32 hashrnd __read_mostly;
|
2013-10-24 02:06:00 +08:00
|
|
|
static __always_inline void __flow_hash_secret_init(void)
|
|
|
|
{
|
|
|
|
net_get_random_once(&hashrnd, sizeof(hashrnd));
|
|
|
|
}
|
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
|
|
|
|
u32 keyval)
|
2015-06-05 00:16:39 +08:00
|
|
|
{
|
|
|
|
return jhash2(words, length, keyval);
|
|
|
|
}
|
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
|
2013-10-24 02:06:00 +08:00
|
|
|
{
|
2015-09-02 12:19:17 +08:00
|
|
|
const void *p = flow;
|
|
|
|
|
2015-06-05 00:16:39 +08:00
|
|
|
BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
|
2015-09-02 12:19:17 +08:00
|
|
|
return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
|
2015-06-05 00:16:39 +08:00
|
|
|
}
|
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
|
2015-06-05 00:16:39 +08:00
|
|
|
{
|
2015-06-05 00:16:40 +08:00
|
|
|
size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
|
2015-06-05 00:16:39 +08:00
|
|
|
BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
|
2015-06-05 00:16:40 +08:00
|
|
|
BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
|
|
|
|
sizeof(*flow) - sizeof(flow->addrs));
|
|
|
|
|
|
|
|
switch (flow->control.addr_type) {
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
|
|
|
|
diff -= sizeof(flow->addrs.v4addrs);
|
|
|
|
break;
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
|
|
|
|
diff -= sizeof(flow->addrs.v6addrs);
|
|
|
|
break;
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 16:59:26 +08:00
|
|
|
case FLOW_DISSECTOR_KEY_TIPC:
|
|
|
|
diff -= sizeof(flow->addrs.tipckey);
|
2015-06-05 00:16:41 +08:00
|
|
|
break;
|
2015-06-05 00:16:40 +08:00
|
|
|
}
|
|
|
|
return (sizeof(*flow) - diff) / sizeof(u32);
|
|
|
|
}
|
|
|
|
|
|
|
|
__be32 flow_get_u32_src(const struct flow_keys *flow)
|
|
|
|
{
|
|
|
|
switch (flow->control.addr_type) {
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
|
|
|
|
return flow->addrs.v4addrs.src;
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
|
|
|
|
return (__force __be32)ipv6_addr_hash(
|
|
|
|
&flow->addrs.v6addrs.src);
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 16:59:26 +08:00
|
|
|
case FLOW_DISSECTOR_KEY_TIPC:
|
|
|
|
return flow->addrs.tipckey.key;
|
2015-06-05 00:16:40 +08:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(flow_get_u32_src);
|
|
|
|
|
|
|
|
__be32 flow_get_u32_dst(const struct flow_keys *flow)
|
|
|
|
{
|
|
|
|
switch (flow->control.addr_type) {
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
|
|
|
|
return flow->addrs.v4addrs.dst;
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
|
|
|
|
return (__force __be32)ipv6_addr_hash(
|
|
|
|
&flow->addrs.v6addrs.dst);
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(flow_get_u32_dst);
|
|
|
|
|
|
|
|
static inline void __flow_hash_consistentify(struct flow_keys *keys)
|
|
|
|
{
|
|
|
|
int addr_diff, i;
|
|
|
|
|
|
|
|
switch (keys->control.addr_type) {
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
|
|
|
|
addr_diff = (__force u32)keys->addrs.v4addrs.dst -
|
|
|
|
(__force u32)keys->addrs.v4addrs.src;
|
|
|
|
if ((addr_diff < 0) ||
|
|
|
|
(addr_diff == 0 &&
|
|
|
|
((__force u16)keys->ports.dst <
|
|
|
|
(__force u16)keys->ports.src))) {
|
|
|
|
swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
|
|
|
|
swap(keys->ports.src, keys->ports.dst);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
|
|
|
|
addr_diff = memcmp(&keys->addrs.v6addrs.dst,
|
|
|
|
&keys->addrs.v6addrs.src,
|
|
|
|
sizeof(keys->addrs.v6addrs.dst));
|
|
|
|
if ((addr_diff < 0) ||
|
|
|
|
(addr_diff == 0 &&
|
|
|
|
((__force u16)keys->ports.dst <
|
|
|
|
(__force u16)keys->ports.src))) {
|
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
swap(keys->addrs.v6addrs.src.s6_addr32[i],
|
|
|
|
keys->addrs.v6addrs.dst.s6_addr32[i]);
|
|
|
|
swap(keys->ports.src, keys->ports.dst);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2013-10-24 02:06:00 +08:00
|
|
|
}
|
|
|
|
|
2015-05-02 02:30:12 +08:00
|
|
|
static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
|
2014-07-02 12:32:05 +08:00
|
|
|
{
|
|
|
|
u32 hash;
|
|
|
|
|
2015-06-05 00:16:40 +08:00
|
|
|
__flow_hash_consistentify(keys);
|
2014-07-02 12:32:05 +08:00
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
hash = __flow_hash_words(flow_keys_hash_start(keys),
|
2015-06-05 00:16:39 +08:00
|
|
|
flow_keys_hash_length(keys), keyval);
|
2014-07-02 12:32:05 +08:00
|
|
|
if (!hash)
|
|
|
|
hash = 1;
|
|
|
|
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 flow_hash_from_keys(struct flow_keys *keys)
|
|
|
|
{
|
2015-05-02 02:30:12 +08:00
|
|
|
__flow_hash_secret_init();
|
|
|
|
return __flow_hash_from_keys(keys, hashrnd);
|
2014-07-02 12:32:05 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(flow_hash_from_keys);
|
|
|
|
|
2015-05-02 02:30:12 +08:00
|
|
|
static inline u32 ___skb_get_hash(const struct sk_buff *skb,
|
|
|
|
struct flow_keys *keys, u32 keyval)
|
|
|
|
{
|
2015-09-02 00:24:33 +08:00
|
|
|
skb_flow_dissect_flow_keys(skb, keys,
|
|
|
|
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
|
2015-05-02 02:30:12 +08:00
|
|
|
|
|
|
|
return __flow_hash_from_keys(keys, keyval);
|
|
|
|
}
|
|
|
|
|
2015-05-02 02:30:17 +08:00
|
|
|
struct _flow_keys_digest_data {
|
|
|
|
__be16 n_proto;
|
|
|
|
u8 ip_proto;
|
|
|
|
u8 padding;
|
|
|
|
__be32 ports;
|
|
|
|
__be32 src;
|
|
|
|
__be32 dst;
|
|
|
|
};
|
|
|
|
|
|
|
|
void make_flow_keys_digest(struct flow_keys_digest *digest,
|
|
|
|
const struct flow_keys *flow)
|
|
|
|
{
|
|
|
|
struct _flow_keys_digest_data *data =
|
|
|
|
(struct _flow_keys_digest_data *)digest;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
|
|
|
|
|
|
|
|
memset(digest, 0, sizeof(*digest));
|
|
|
|
|
2015-05-12 20:56:16 +08:00
|
|
|
data->n_proto = flow->basic.n_proto;
|
|
|
|
data->ip_proto = flow->basic.ip_proto;
|
|
|
|
data->ports = flow->ports.ports;
|
2015-06-05 00:16:40 +08:00
|
|
|
data->src = flow->addrs.v4addrs.src;
|
|
|
|
data->dst = flow->addrs.v4addrs.dst;
|
2015-05-02 02:30:17 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(make_flow_keys_digest);
|
|
|
|
|
2016-07-02 04:07:50 +08:00
|
|
|
static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
|
|
|
|
|
2016-10-27 00:49:46 +08:00
|
|
|
u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
|
2016-07-02 04:07:50 +08:00
|
|
|
{
|
|
|
|
struct flow_keys keys;
|
|
|
|
|
|
|
|
__flow_hash_secret_init();
|
|
|
|
|
|
|
|
memset(&keys, 0, sizeof(keys));
|
|
|
|
__skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
|
|
|
|
NULL, 0, 0, 0,
|
|
|
|
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
|
|
|
|
|
|
|
|
return __flow_hash_from_keys(&keys, hashrnd);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
|
|
|
|
|
2015-05-12 20:56:10 +08:00
|
|
|
/**
|
|
|
|
* __skb_get_hash: calculate a flow hash
|
|
|
|
* @skb: sk_buff to calculate flow hash from
|
|
|
|
*
|
|
|
|
* This function calculates a flow hash based on src/dst addresses
|
2014-03-25 06:34:47 +08:00
|
|
|
* and src/dst port numbers. Sets hash in skb to non-zero hash value
|
|
|
|
* on success, zero indicates no valid hash. Also, sets l4_hash in skb
|
2013-01-21 08:39:24 +08:00
|
|
|
* if hash is a canonical 4-tuple hash over transport ports.
|
|
|
|
*/
|
2013-12-16 14:12:06 +08:00
|
|
|
void __skb_get_hash(struct sk_buff *skb)
|
2013-01-21 08:39:24 +08:00
|
|
|
{
|
|
|
|
struct flow_keys keys;
|
2016-08-31 14:15:05 +08:00
|
|
|
u32 hash;
|
2013-01-21 08:39:24 +08:00
|
|
|
|
2015-05-02 02:30:12 +08:00
|
|
|
__flow_hash_secret_init();
|
|
|
|
|
2016-08-31 14:15:05 +08:00
|
|
|
hash = ___skb_get_hash(skb, &keys, hashrnd);
|
|
|
|
|
|
|
|
__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
|
2013-01-21 08:39:24 +08:00
|
|
|
}
|
2013-12-16 14:12:06 +08:00
|
|
|
EXPORT_SYMBOL(__skb_get_hash);
|
2013-01-21 08:39:24 +08:00
|
|
|
|
2015-05-02 02:30:12 +08:00
|
|
|
__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
|
|
|
|
{
|
|
|
|
struct flow_keys keys;
|
|
|
|
|
|
|
|
return ___skb_get_hash(skb, &keys, perturb);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(skb_get_hash_perturb);
|
|
|
|
|
2014-09-06 07:20:26 +08:00
|
|
|
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
|
2018-05-04 17:32:59 +08:00
|
|
|
const struct flow_keys_basic *keys, int hlen)
|
2013-03-19 14:39:30 +08:00
|
|
|
{
|
2015-06-05 00:16:39 +08:00
|
|
|
u32 poff = keys->control.thoff;
|
2013-03-19 14:39:30 +08:00
|
|
|
|
2016-02-25 01:29:44 +08:00
|
|
|
/* skip L4 headers for fragments after the first */
|
|
|
|
if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
|
|
|
|
!(keys->control.flags & FLOW_DIS_FIRST_FRAG))
|
|
|
|
return poff;
|
|
|
|
|
2015-05-12 20:56:16 +08:00
|
|
|
switch (keys->basic.ip_proto) {
|
2013-03-19 14:39:30 +08:00
|
|
|
case IPPROTO_TCP: {
|
2014-10-11 03:09:12 +08:00
|
|
|
/* access doff as u8 to avoid unaligned access */
|
|
|
|
const u8 *doff;
|
|
|
|
u8 _doff;
|
2013-03-19 14:39:30 +08:00
|
|
|
|
2014-10-11 03:09:12 +08:00
|
|
|
doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
|
|
|
|
data, hlen, &_doff);
|
|
|
|
if (!doff)
|
2013-03-19 14:39:30 +08:00
|
|
|
return poff;
|
|
|
|
|
2014-10-11 03:09:12 +08:00
|
|
|
poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
|
2013-03-19 14:39:30 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
case IPPROTO_UDPLITE:
|
|
|
|
poff += sizeof(struct udphdr);
|
|
|
|
break;
|
|
|
|
/* For the rest, we do not really care about header
|
|
|
|
* extensions at this point for now.
|
|
|
|
*/
|
|
|
|
case IPPROTO_ICMP:
|
|
|
|
poff += sizeof(struct icmphdr);
|
|
|
|
break;
|
|
|
|
case IPPROTO_ICMPV6:
|
|
|
|
poff += sizeof(struct icmp6hdr);
|
|
|
|
break;
|
|
|
|
case IPPROTO_IGMP:
|
|
|
|
poff += sizeof(struct igmphdr);
|
|
|
|
break;
|
|
|
|
case IPPROTO_DCCP:
|
|
|
|
poff += sizeof(struct dccp_hdr);
|
|
|
|
break;
|
|
|
|
case IPPROTO_SCTP:
|
|
|
|
poff += sizeof(struct sctphdr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return poff;
|
|
|
|
}
|
|
|
|
|
2015-05-12 20:56:14 +08:00
|
|
|
/**
|
|
|
|
* skb_get_poff - get the offset to the payload
|
|
|
|
* @skb: sk_buff to get the payload offset from
|
|
|
|
*
|
|
|
|
* The function will get the offset to the payload as far as it could
|
|
|
|
* be dissected. The main user is currently BPF, so that we can dynamically
|
2014-09-06 07:20:26 +08:00
|
|
|
* truncate packets without needing to push actual payload to the user
|
|
|
|
* space and can analyze headers only, instead.
|
|
|
|
*/
|
|
|
|
u32 skb_get_poff(const struct sk_buff *skb)
|
|
|
|
{
|
2018-05-04 17:32:59 +08:00
|
|
|
struct flow_keys_basic keys;
|
2014-09-06 07:20:26 +08:00
|
|
|
|
2018-05-07 18:06:03 +08:00
|
|
|
if (!skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
|
2014-09-06 07:20:26 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
|
|
|
|
}
|
2015-05-12 20:56:16 +08:00
|
|
|
|
2015-09-02 12:19:17 +08:00
|
|
|
__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
|
2015-09-02 08:00:24 +08:00
|
|
|
{
|
|
|
|
memset(keys, 0, sizeof(*keys));
|
|
|
|
|
|
|
|
memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
|
|
|
|
sizeof(keys->addrs.v6addrs.src));
|
|
|
|
memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
|
|
|
|
sizeof(keys->addrs.v6addrs.dst));
|
|
|
|
keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
|
|
|
keys->ports.src = fl6->fl6_sport;
|
|
|
|
keys->ports.dst = fl6->fl6_dport;
|
|
|
|
keys->keyid.keyid = fl6->fl6_gre_key;
|
2018-06-04 17:36:05 +08:00
|
|
|
keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
|
2015-09-02 08:00:24 +08:00
|
|
|
keys->basic.ip_proto = fl6->flowi6_proto;
|
|
|
|
|
|
|
|
return flow_hash_from_keys(keys);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__get_hash_from_flowi6);
|
|
|
|
|
2015-05-12 20:56:16 +08:00
|
|
|
static const struct flow_dissector_key flow_keys_dissector_keys[] = {
|
2015-06-05 00:16:39 +08:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
|
.offset = offsetof(struct flow_keys, control),
|
|
|
|
},
|
2015-05-12 20:56:16 +08:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_BASIC,
|
|
|
|
.offset = offsetof(struct flow_keys, basic),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
2015-06-05 00:16:40 +08:00
|
|
|
.offset = offsetof(struct flow_keys, addrs.v4addrs),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
|
|
.offset = offsetof(struct flow_keys, addrs.v6addrs),
|
2015-05-12 20:56:16 +08:00
|
|
|
},
|
2015-06-05 00:16:41 +08:00
|
|
|
{
|
tipc: improve link resiliency when rps is activated
Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.
To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.
To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.
It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 16:59:26 +08:00
|
|
|
.key_id = FLOW_DISSECTOR_KEY_TIPC,
|
|
|
|
.offset = offsetof(struct flow_keys, addrs.tipckey),
|
2015-06-05 00:16:41 +08:00
|
|
|
},
|
2015-05-12 20:56:16 +08:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_PORTS,
|
|
|
|
.offset = offsetof(struct flow_keys, ports),
|
|
|
|
},
|
2015-06-05 00:16:43 +08:00
|
|
|
{
|
2016-08-17 18:36:11 +08:00
|
|
|
.key_id = FLOW_DISSECTOR_KEY_VLAN,
|
|
|
|
.offset = offsetof(struct flow_keys, vlan),
|
2015-06-05 00:16:43 +08:00
|
|
|
},
|
2015-06-05 00:16:44 +08:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
|
|
|
|
.offset = offsetof(struct flow_keys, tags),
|
|
|
|
},
|
2015-06-05 00:16:45 +08:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
|
|
|
|
.offset = offsetof(struct flow_keys, keyid),
|
|
|
|
},
|
2015-05-12 20:56:16 +08:00
|
|
|
};
|
|
|
|
|
2016-07-02 04:07:50 +08:00
|
|
|
static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
|
.offset = offsetof(struct flow_keys, control),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_BASIC,
|
|
|
|
.offset = offsetof(struct flow_keys, basic),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
|
|
|
.offset = offsetof(struct flow_keys, addrs.v4addrs),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
|
|
.offset = offsetof(struct flow_keys, addrs.v6addrs),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_PORTS,
|
|
|
|
.offset = offsetof(struct flow_keys, ports),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2018-05-04 17:32:59 +08:00
|
|
|
static const struct flow_dissector_key flow_keys_basic_dissector_keys[] = {
|
2015-06-05 00:16:39 +08:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
|
|
|
|
.offset = offsetof(struct flow_keys, control),
|
|
|
|
},
|
2015-05-12 20:56:16 +08:00
|
|
|
{
|
|
|
|
.key_id = FLOW_DISSECTOR_KEY_BASIC,
|
|
|
|
.offset = offsetof(struct flow_keys, basic),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
struct flow_dissector flow_keys_dissector __read_mostly;
|
|
|
|
EXPORT_SYMBOL(flow_keys_dissector);
|
|
|
|
|
2018-05-04 17:32:59 +08:00
|
|
|
struct flow_dissector flow_keys_basic_dissector __read_mostly;
|
|
|
|
EXPORT_SYMBOL(flow_keys_basic_dissector);
|
2015-05-12 20:56:16 +08:00
|
|
|
|
|
|
|
static int __init init_default_flow_dissectors(void)
|
|
|
|
{
|
|
|
|
skb_flow_dissector_init(&flow_keys_dissector,
|
|
|
|
flow_keys_dissector_keys,
|
|
|
|
ARRAY_SIZE(flow_keys_dissector_keys));
|
2016-07-02 04:07:50 +08:00
|
|
|
skb_flow_dissector_init(&flow_keys_dissector_symmetric,
|
|
|
|
flow_keys_dissector_symmetric_keys,
|
|
|
|
ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
|
2018-05-04 17:32:59 +08:00
|
|
|
skb_flow_dissector_init(&flow_keys_basic_dissector,
|
|
|
|
flow_keys_basic_dissector_keys,
|
|
|
|
ARRAY_SIZE(flow_keys_basic_dissector_keys));
|
2015-05-12 20:56:16 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-23 03:17:30 +08:00
|
|
|
core_initcall(init_default_flow_dissectors);
|