2005-04-17 06:20:36 +08:00
|
|
|
#ifndef _NET_IP6_TUNNEL_H
|
|
|
|
#define _NET_IP6_TUNNEL_H
|
|
|
|
|
|
|
|
#include <linux/ipv6.h>
|
|
|
|
#include <linux/netdevice.h>
|
2013-03-25 22:49:35 +08:00
|
|
|
#include <linux/if_tunnel.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/ip6_tunnel.h>
|
|
|
|
|
2012-08-10 08:51:50 +08:00
|
|
|
#define IP6TUNNEL_ERR_TIMEO (30*HZ)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* capable of sending packets */
|
|
|
|
#define IP6_TNL_F_CAP_XMIT 0x10000
|
|
|
|
/* capable of receiving packets */
|
|
|
|
#define IP6_TNL_F_CAP_RCV 0x20000
|
2012-06-29 02:15:52 +08:00
|
|
|
/* determine capability on a per-packet basis */
|
|
|
|
#define IP6_TNL_F_CAP_PER_PACKET 0x40000
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-08-10 08:51:50 +08:00
|
|
|
struct __ip6_tnl_parm {
|
|
|
|
char name[IFNAMSIZ]; /* name of tunnel device */
|
|
|
|
int link; /* ifindex of underlying L2 interface */
|
|
|
|
__u8 proto; /* tunnel protocol */
|
|
|
|
__u8 encap_limit; /* encapsulation limit for tunnel */
|
|
|
|
__u8 hop_limit; /* hop limit for tunnel */
|
|
|
|
__be32 flowinfo; /* traffic class and flowlabel for tunnel */
|
|
|
|
__u32 flags; /* tunnel flags */
|
|
|
|
struct in6_addr laddr; /* local tunnel end-point address */
|
|
|
|
struct in6_addr raddr; /* remote tunnel end-point address */
|
|
|
|
|
|
|
|
__be16 i_flags;
|
|
|
|
__be16 o_flags;
|
|
|
|
__be32 i_key;
|
|
|
|
__be32 o_key;
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-09-16 05:30:07 +08:00
|
|
|
struct ip6_tnl_dst {
|
2015-09-16 05:30:09 +08:00
|
|
|
seqlock_t lock;
|
|
|
|
struct dst_entry __rcu *dst;
|
2015-09-16 05:30:07 +08:00
|
|
|
u32 cookie;
|
|
|
|
};
|
|
|
|
|
2012-08-10 08:51:50 +08:00
|
|
|
/* IPv6 tunnel */
|
2005-04-17 06:20:36 +08:00
|
|
|
struct ip6_tnl {
|
2010-10-25 05:33:16 +08:00
|
|
|
struct ip6_tnl __rcu *next; /* next tunnel in list */
|
2005-04-17 06:20:36 +08:00
|
|
|
struct net_device *dev; /* virtual device associated with tunnel */
|
2013-08-13 23:51:12 +08:00
|
|
|
struct net *net; /* netns for packet i/o */
|
2012-08-10 08:51:50 +08:00
|
|
|
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
|
2005-04-17 06:20:36 +08:00
|
|
|
struct flowi fl; /* flowi template for xmit */
|
2015-09-16 05:30:07 +08:00
|
|
|
struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */
|
2012-08-10 08:51:50 +08:00
|
|
|
|
|
|
|
int err_count;
|
|
|
|
unsigned long err_time;
|
|
|
|
|
|
|
|
/* These fields used only by GRE */
|
|
|
|
__u32 i_seqno; /* The last seen seqno */
|
|
|
|
__u32 o_seqno; /* The last output seqno */
|
|
|
|
int hlen; /* Precalculated GRE header length */
|
|
|
|
int mlink;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Tunnel encapsulation limit destination sub-option */
|
|
|
|
|
|
|
|
struct ipv6_tlv_tnl_enc_lim {
|
|
|
|
__u8 type; /* type-code for option */
|
|
|
|
__u8 length; /* option length */
|
|
|
|
__u8 encap_limit; /* tunnel encapsulation limit */
|
2010-06-03 18:21:52 +08:00
|
|
|
} __packed;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-09-16 05:30:06 +08:00
|
|
|
struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t);
|
2015-09-16 05:30:07 +08:00
|
|
|
int ip6_tnl_dst_init(struct ip6_tnl *t);
|
|
|
|
void ip6_tnl_dst_destroy(struct ip6_tnl *t);
|
2012-08-10 08:51:50 +08:00
|
|
|
void ip6_tnl_dst_reset(struct ip6_tnl *t);
|
2015-09-16 05:30:06 +08:00
|
|
|
void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst);
|
2012-08-10 08:51:50 +08:00
|
|
|
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
|
|
|
|
const struct in6_addr *raddr);
|
2014-11-05 15:02:48 +08:00
|
|
|
int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
|
|
|
|
const struct in6_addr *raddr);
|
2012-08-10 08:51:50 +08:00
|
|
|
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
|
|
|
|
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
|
|
|
|
const struct in6_addr *raddr);
|
2015-01-15 22:11:17 +08:00
|
|
|
struct net *ip6_tnl_get_link_net(const struct net_device *dev);
|
2015-04-02 23:07:01 +08:00
|
|
|
int ip6_tnl_get_iflink(const struct net_device *dev);
|
2012-08-10 08:51:50 +08:00
|
|
|
|
2015-04-06 10:19:09 +08:00
|
|
|
static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2013-03-10 07:00:39 +08:00
|
|
|
{
|
|
|
|
struct net_device_stats *stats = &dev->stats;
|
|
|
|
int pkt_len, err;
|
|
|
|
|
ip6tunnel: make rx/tx bytes counters consistent
Like the previous patch, which fixes ipv4 tunnels, here is the ipv6 part.
Before the patch, the external ipv6 header + gre header were included on
tx.
After the patch:
$ ping -c1 192.168.6.121 ; ip -s l ls dev ip6gre1
PING 192.168.6.121 (192.168.6.121) 56(84) bytes of data.
64 bytes from 192.168.6.121: icmp_req=1 ttl=64 time=1.92 ms
--- 192.168.6.121 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 1.923/1.923/1.923/0.000 ms
7: ip6gre1@NONE: <POINTOPOINT,NOARP,UP,LOWER_UP> mtu 1440 qdisc noqueue state UNKNOWN mode DEFAULT group default
link/gre6 20:01:06:60:30:08:c1:c3:00:00:00:00:00:00:01:23 peer 20:01:06:60:30:08:c1:c3:00:00:00:00:00:00:01:21
RX: bytes packets errors dropped overrun mcast
84 1 0 0 0 0
TX: bytes packets errors dropped carrier collsns
84 1 0 0 0 0
$ ping -c1 192.168.1.121 ; ip -s l ls dev ip6tnl1
PING 192.168.1.121 (192.168.1.121) 56(84) bytes of data.
64 bytes from 192.168.1.121: icmp_req=1 ttl=64 time=2.28 ms
--- 192.168.1.121 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 2.288/2.288/2.288/0.000 ms
8: ip6tnl1@NONE: <POINTOPOINT,NOARP,UP,LOWER_UP> mtu 1452 qdisc noqueue state UNKNOWN mode DEFAULT group default
link/tunnel6 2001:660:3008:c1c3::123 peer 2001:660:3008:c1c3::121
RX: bytes packets errors dropped overrun mcast
84 1 0 0 0 0
TX: bytes packets errors dropped carrier collsns
84 1 0 0 0 0
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-09-18 17:47:41 +08:00
|
|
|
pkt_len = skb->len - skb_inner_network_offset(skb);
|
2015-10-08 05:48:46 +08:00
|
|
|
err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
|
2013-03-10 07:00:39 +08:00
|
|
|
|
|
|
|
if (net_xmit_eval(err) == 0) {
|
ip_tunnel: disable preemption when updating per-cpu tstats
Drivers like vxlan use the recently introduced
udp_tunnel_xmit_skb/udp_tunnel6_xmit_skb APIs. udp_tunnel6_xmit_skb
makes use of ip6tunnel_xmit, and ip6tunnel_xmit, after sending the
packet, updates the struct stats using the usual
u64_stats_update_begin/end calls on this_cpu_ptr(dev->tstats).
udp_tunnel_xmit_skb makes use of iptunnel_xmit, which doesn't touch
tstats, so drivers like vxlan, immediately after, call
iptunnel_xmit_stats, which does the same thing - calls
u64_stats_update_begin/end on this_cpu_ptr(dev->tstats).
While vxlan is probably fine (I don't know?), calling a similar function
from, say, an unbound workqueue, on a fully preemptable kernel causes
real issues:
[ 188.434537] BUG: using smp_processor_id() in preemptible [00000000] code: kworker/u8:0/6
[ 188.435579] caller is debug_smp_processor_id+0x17/0x20
[ 188.435583] CPU: 0 PID: 6 Comm: kworker/u8:0 Not tainted 4.2.6 #2
[ 188.435607] Call Trace:
[ 188.435611] [<ffffffff8234e936>] dump_stack+0x4f/0x7b
[ 188.435615] [<ffffffff81915f3d>] check_preemption_disabled+0x19d/0x1c0
[ 188.435619] [<ffffffff81915f77>] debug_smp_processor_id+0x17/0x20
The solution would be to protect the whole
this_cpu_ptr(dev->tstats)/u64_stats_update_begin/end blocks with
disabling preemption and then reenabling it.
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-11-13 00:35:58 +08:00
|
|
|
struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
|
2013-03-10 07:00:39 +08:00
|
|
|
u64_stats_update_begin(&tstats->syncp);
|
|
|
|
tstats->tx_bytes += pkt_len;
|
|
|
|
tstats->tx_packets++;
|
|
|
|
u64_stats_update_end(&tstats->syncp);
|
ip_tunnel: disable preemption when updating per-cpu tstats
Drivers like vxlan use the recently introduced
udp_tunnel_xmit_skb/udp_tunnel6_xmit_skb APIs. udp_tunnel6_xmit_skb
makes use of ip6tunnel_xmit, and ip6tunnel_xmit, after sending the
packet, updates the struct stats using the usual
u64_stats_update_begin/end calls on this_cpu_ptr(dev->tstats).
udp_tunnel_xmit_skb makes use of iptunnel_xmit, which doesn't touch
tstats, so drivers like vxlan, immediately after, call
iptunnel_xmit_stats, which does the same thing - calls
u64_stats_update_begin/end on this_cpu_ptr(dev->tstats).
While vxlan is probably fine (I don't know?), calling a similar function
from, say, an unbound workqueue, on a fully preemptable kernel causes
real issues:
[ 188.434537] BUG: using smp_processor_id() in preemptible [00000000] code: kworker/u8:0/6
[ 188.435579] caller is debug_smp_processor_id+0x17/0x20
[ 188.435583] CPU: 0 PID: 6 Comm: kworker/u8:0 Not tainted 4.2.6 #2
[ 188.435607] Call Trace:
[ 188.435611] [<ffffffff8234e936>] dump_stack+0x4f/0x7b
[ 188.435615] [<ffffffff81915f3d>] check_preemption_disabled+0x19d/0x1c0
[ 188.435619] [<ffffffff81915f77>] debug_smp_processor_id+0x17/0x20
The solution would be to protect the whole
this_cpu_ptr(dev->tstats)/u64_stats_update_begin/end blocks with
disabling preemption and then reenabling it.
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-11-13 00:35:58 +08:00
|
|
|
put_cpu_ptr(tstats);
|
2013-03-10 07:00:39 +08:00
|
|
|
} else {
|
|
|
|
stats->tx_errors++;
|
|
|
|
stats->tx_aborted_errors++;
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|