tunnels: Optimize tx path
We currently dirty a cache line to update tunnel device stats (tx_packets/tx_bytes). We better use the txq->tx_bytes/tx_packets counters that already are present in cpu cache, in the cache line shared with txq->_xmit_lock This patch extends IPTUNNEL_XMIT() macro to use txq pointer provided by the caller. Also &tunnel->dev->stats can be replaced by &dev->stats Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
16c6cf8bb4
commit
0bfbedb14a
|
@ -42,9 +42,9 @@ struct ip_tunnel_prl_entry
|
||||||
ip_select_ident(iph, &rt->u.dst, NULL); \
|
ip_select_ident(iph, &rt->u.dst, NULL); \
|
||||||
\
|
\
|
||||||
err = ip_local_out(skb); \
|
err = ip_local_out(skb); \
|
||||||
if (net_xmit_eval(err) == 0) { \
|
if (likely(net_xmit_eval(err) == 0)) { \
|
||||||
stats->tx_bytes += pkt_len; \
|
txq->tx_bytes += pkt_len; \
|
||||||
stats->tx_packets++; \
|
txq->tx_packets++; \
|
||||||
} else { \
|
} else { \
|
||||||
stats->tx_errors++; \
|
stats->tx_errors++; \
|
||||||
stats->tx_aborted_errors++; \
|
stats->tx_aborted_errors++; \
|
||||||
|
|
|
@ -662,7 +662,8 @@ drop_nolock:
|
||||||
static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||||
struct net_device_stats *stats = &tunnel->dev->stats;
|
struct net_device_stats *stats = &dev->stats;
|
||||||
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
|
||||||
struct iphdr *old_iph = ip_hdr(skb);
|
struct iphdr *old_iph = ip_hdr(skb);
|
||||||
struct iphdr *tiph;
|
struct iphdr *tiph;
|
||||||
u8 tos;
|
u8 tos;
|
||||||
|
@ -810,7 +811,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
|
||||||
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
|
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
|
||||||
if (!new_skb) {
|
if (!new_skb) {
|
||||||
ip_rt_put(rt);
|
ip_rt_put(rt);
|
||||||
stats->tx_dropped++;
|
txq->tx_dropped++;
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
|
@ -390,7 +390,8 @@ static int ipip_rcv(struct sk_buff *skb)
|
||||||
static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||||
struct net_device_stats *stats = &tunnel->dev->stats;
|
struct net_device_stats *stats = &dev->stats;
|
||||||
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
|
||||||
struct iphdr *tiph = &tunnel->parms.iph;
|
struct iphdr *tiph = &tunnel->parms.iph;
|
||||||
u8 tos = tunnel->parms.iph.tos;
|
u8 tos = tunnel->parms.iph.tos;
|
||||||
__be16 df = tiph->frag_off;
|
__be16 df = tiph->frag_off;
|
||||||
|
@ -478,7 +479,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
|
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
|
||||||
if (!new_skb) {
|
if (!new_skb) {
|
||||||
ip_rt_put(rt);
|
ip_rt_put(rt);
|
||||||
stats->tx_dropped++;
|
txq->tx_dropped++;
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
|
@ -555,7 +555,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
|
||||||
struct net_device *dev)
|
struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||||
struct net_device_stats *stats = &tunnel->dev->stats;
|
struct net_device_stats *stats = &dev->stats;
|
||||||
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
|
||||||
struct iphdr *tiph = &tunnel->parms.iph;
|
struct iphdr *tiph = &tunnel->parms.iph;
|
||||||
struct ipv6hdr *iph6 = ipv6_hdr(skb);
|
struct ipv6hdr *iph6 = ipv6_hdr(skb);
|
||||||
u8 tos = tunnel->parms.iph.tos;
|
u8 tos = tunnel->parms.iph.tos;
|
||||||
|
@ -688,7 +689,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
|
||||||
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
|
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
|
||||||
if (!new_skb) {
|
if (!new_skb) {
|
||||||
ip_rt_put(rt);
|
ip_rt_put(rt);
|
||||||
stats->tx_dropped++;
|
txq->tx_dropped++;
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue