vxlan: Eliminate dependency on UDP socket in transmit path

In the vxlan transmit path there is no need to reference the socket
for a tunnel which is needed for the receive side. We do, however,
need the vxlan_dev flags. This patch eliminate references
to the socket in the transmit path, and changes VXLAN_F_UNSHAREABLE
to be VXLAN_F_RCV_FLAGS. This mask is used to store the flags
applicable to receive (GBP, CSUM6_RX, and REMCSUM_RX) in the
vxlan_sock flags.

Signed-off-by: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Tom Herbert 2015-01-20 11:23:05 -08:00 committed by David S. Miller
parent d998f8efa4
commit af33c1adae
3 changed files with 38 additions and 41 deletions

View File

@ -270,12 +270,13 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
__be16 port, u32 flags) __be16 port, u32 flags)
{ {
struct vxlan_sock *vs; struct vxlan_sock *vs;
u32 match_flags = flags & VXLAN_F_UNSHAREABLE;
flags &= VXLAN_F_RCV_FLAGS;
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
if (inet_sk(vs->sock->sk)->inet_sport == port && if (inet_sk(vs->sock->sk)->inet_sport == port &&
inet_sk(vs->sock->sk)->sk.sk_family == family && inet_sk(vs->sock->sk)->sk.sk_family == family &&
(vs->flags & VXLAN_F_UNSHAREABLE) == match_flags) vs->flags == flags)
return vs; return vs;
} }
return NULL; return NULL;
@ -1674,7 +1675,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
return false; return false;
} }
static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, struct vxlan_sock *vs, static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
struct vxlan_metadata *md) struct vxlan_metadata *md)
{ {
struct vxlanhdr_gbp *gbp; struct vxlanhdr_gbp *gbp;
@ -1692,21 +1693,20 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, struct vxlan_sock *vs,
} }
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
static int vxlan6_xmit_skb(struct vxlan_sock *vs, static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
struct dst_entry *dst, struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr, struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr, __u8 prio, __u8 ttl, struct in6_addr *daddr, __u8 prio, __u8 ttl,
__be16 src_port, __be16 dst_port, __be16 src_port, __be16 dst_port,
struct vxlan_metadata *md, bool xnet) struct vxlan_metadata *md, bool xnet, u32 vxflags)
{ {
struct vxlanhdr *vxh; struct vxlanhdr *vxh;
int min_headroom; int min_headroom;
int err; int err;
bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk); bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
u16 hdrlen = sizeof(struct vxlanhdr); u16 hdrlen = sizeof(struct vxlanhdr);
if ((vs->flags & VXLAN_F_REMCSUM_TX) && if ((vxflags & VXLAN_F_REMCSUM_TX) &&
skb->ip_summed == CHECKSUM_PARTIAL) { skb->ip_summed == CHECKSUM_PARTIAL) {
int csum_start = skb_checksum_start_offset(skb); int csum_start = skb_checksum_start_offset(skb);
@ -1764,14 +1764,14 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
} }
} }
if (vs->flags & VXLAN_F_GBP) if (vxflags & VXLAN_F_GBP)
vxlan_build_gbp_hdr(vxh, vs, md); vxlan_build_gbp_hdr(vxh, vxflags, md);
skb_set_inner_protocol(skb, htons(ETH_P_TEB)); skb_set_inner_protocol(skb, htons(ETH_P_TEB));
udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio, udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio,
ttl, src_port, dst_port, ttl, src_port, dst_port,
udp_get_no_check6_tx(vs->sock->sk)); !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
return 0; return 0;
err: err:
dst_release(dst); dst_release(dst);
@ -1779,20 +1779,19 @@ err:
} }
#endif #endif
int vxlan_xmit_skb(struct vxlan_sock *vs, int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port, __be16 src_port, __be16 dst_port,
struct vxlan_metadata *md, bool xnet) struct vxlan_metadata *md, bool xnet, u32 vxflags)
{ {
struct vxlanhdr *vxh; struct vxlanhdr *vxh;
int min_headroom; int min_headroom;
int err; int err;
bool udp_sum = !vs->sock->sk->sk_no_check_tx; bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
u16 hdrlen = sizeof(struct vxlanhdr); u16 hdrlen = sizeof(struct vxlanhdr);
if ((vs->flags & VXLAN_F_REMCSUM_TX) && if ((vxflags & VXLAN_F_REMCSUM_TX) &&
skb->ip_summed == CHECKSUM_PARTIAL) { skb->ip_summed == CHECKSUM_PARTIAL) {
int csum_start = skb_checksum_start_offset(skb); int csum_start = skb_checksum_start_offset(skb);
@ -1844,14 +1843,14 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
} }
} }
if (vs->flags & VXLAN_F_GBP) if (vxflags & VXLAN_F_GBP)
vxlan_build_gbp_hdr(vxh, vs, md); vxlan_build_gbp_hdr(vxh, vxflags, md);
skb_set_inner_protocol(skb, htons(ETH_P_TEB)); skb_set_inner_protocol(skb, htons(ETH_P_TEB));
return udp_tunnel_xmit_skb(rt, skb, src, dst, tos, return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
ttl, df, src_port, dst_port, xnet, ttl, df, src_port, dst_port, xnet,
vs->sock->sk->sk_no_check_tx); !(vxflags & VXLAN_F_UDP_CSUM));
} }
EXPORT_SYMBOL_GPL(vxlan_xmit_skb); EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
@ -1983,10 +1982,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
md.vni = htonl(vni << 8); md.vni = htonl(vni << 8);
md.gbp = skb->mark; md.gbp = skb->mark;
err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb, err = vxlan_xmit_skb(rt, skb, fl4.saddr,
fl4.saddr, dst->sin.sin_addr.s_addr, dst->sin.sin_addr.s_addr, tos, ttl, df,
tos, ttl, df, src_port, dst_port, &md, src_port, dst_port, &md,
!net_eq(vxlan->net, dev_net(vxlan->dev))); !net_eq(vxlan->net, dev_net(vxlan->dev)),
vxlan->flags);
if (err < 0) { if (err < 0) {
/* skb is already freed. */ /* skb is already freed. */
skb = NULL; skb = NULL;
@ -2042,10 +2042,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
md.vni = htonl(vni << 8); md.vni = htonl(vni << 8);
md.gbp = skb->mark; md.gbp = skb->mark;
err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb, err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr,
dev, &fl6.saddr, &fl6.daddr, 0, ttl, 0, ttl, src_port, dst_port, &md,
src_port, dst_port, &md, !net_eq(vxlan->net, dev_net(vxlan->dev)),
!net_eq(vxlan->net, dev_net(vxlan->dev))); vxlan->flags);
#endif #endif
} }
@ -2517,15 +2517,11 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
if (ipv6) { if (ipv6) {
udp_conf.family = AF_INET6; udp_conf.family = AF_INET6;
udp_conf.use_udp6_tx_checksums =
!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
udp_conf.use_udp6_rx_checksums = udp_conf.use_udp6_rx_checksums =
!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
} else { } else {
udp_conf.family = AF_INET; udp_conf.family = AF_INET;
udp_conf.local_ip.s_addr = INADDR_ANY; udp_conf.local_ip.s_addr = INADDR_ANY;
udp_conf.use_udp_checksums =
!!(flags & VXLAN_F_UDP_CSUM);
} }
udp_conf.local_udp_port = port; udp_conf.local_udp_port = port;
@ -2569,7 +2565,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
atomic_set(&vs->refcnt, 1); atomic_set(&vs->refcnt, 1);
vs->rcv = rcv; vs->rcv = rcv;
vs->data = data; vs->data = data;
vs->flags = flags; vs->flags = (flags & VXLAN_F_RCV_FLAGS);
/* Initialize the vxlan udp offloads structure */ /* Initialize the vxlan udp offloads structure */
vs->udp_offloads.port = port; vs->udp_offloads.port = port;

View File

@ -129,8 +129,12 @@ struct vxlan_sock {
#define VXLAN_F_REMCSUM_RX 0x400 #define VXLAN_F_REMCSUM_RX 0x400
#define VXLAN_F_GBP 0x800 #define VXLAN_F_GBP 0x800
/* These flags must match in order for a socket to be shareable */ /* Flags that are used in the receive patch. These flags must match in
#define VXLAN_F_UNSHAREABLE VXLAN_F_GBP * order for a socket to be shareable
*/
#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_REMCSUM_RX)
struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
vxlan_rcv_t *rcv, void *data, vxlan_rcv_t *rcv, void *data,
@ -138,11 +142,10 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
void vxlan_sock_release(struct vxlan_sock *vs); void vxlan_sock_release(struct vxlan_sock *vs);
int vxlan_xmit_skb(struct vxlan_sock *vs, int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port, struct vxlan_metadata *md, __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
bool xnet); bool xnet, u32 vxflags);
static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)

View File

@ -252,12 +252,10 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8); md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
md.gbp = vxlan_ext_gbp(skb); md.gbp = vxlan_ext_gbp(skb);
err = vxlan_xmit_skb(vxlan_port->vs, rt, skb, err = vxlan_xmit_skb(rt, skb, fl.saddr, tun_key->ipv4_dst,
fl.saddr, tun_key->ipv4_dst,
tun_key->ipv4_tos, tun_key->ipv4_ttl, df, tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
src_port, dst_port, src_port, dst_port,
&md, &md, false, vxlan_port->exts);
false);
if (err < 0) if (err < 0)
ip_rt_put(rt); ip_rt_put(rt);
return err; return err;