i40e/i40evf: Add ATR support for tunneled TCP/IPv4/IPv6 packets.
Without this, RSS would have done inner header load balancing. Now we can get the benefits of ATR for tunneled packets to better align TX and RX queues with the right core/interrupt. Change-ID: I07d0e0a192faf28fdd33b2f04c32b2a82ff97ddd Signed-off-by: Anjali Singhai Jain <anjali.singhai@intel.com> Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
e17bc411ae
commit
89232c3bf7
|
@ -1923,11 +1923,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
|
||||||
* i40e_atr - Add a Flow Director ATR filter
|
* i40e_atr - Add a Flow Director ATR filter
|
||||||
* @tx_ring: ring to add programming descriptor to
|
* @tx_ring: ring to add programming descriptor to
|
||||||
* @skb: send buffer
|
* @skb: send buffer
|
||||||
* @flags: send flags
|
* @tx_flags: send tx flags
|
||||||
* @protocol: wire protocol
|
* @protocol: wire protocol
|
||||||
**/
|
**/
|
||||||
static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||||
u32 flags, __be16 protocol)
|
u32 tx_flags, __be16 protocol)
|
||||||
{
|
{
|
||||||
struct i40e_filter_program_desc *fdir_desc;
|
struct i40e_filter_program_desc *fdir_desc;
|
||||||
struct i40e_pf *pf = tx_ring->vsi->back;
|
struct i40e_pf *pf = tx_ring->vsi->back;
|
||||||
|
@ -1952,25 +1952,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||||
if (!tx_ring->atr_sample_rate)
|
if (!tx_ring->atr_sample_rate)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* snag network header to get L4 type and address */
|
if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
|
||||||
hdr.network = skb_network_header(skb);
|
|
||||||
|
|
||||||
/* Currently only IPv4/IPv6 with TCP is supported */
|
|
||||||
if (protocol == htons(ETH_P_IP)) {
|
|
||||||
if (hdr.ipv4->protocol != IPPROTO_TCP)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* access ihl as a u8 to avoid unaligned access on ia64 */
|
|
||||||
hlen = (hdr.network[0] & 0x0F) << 2;
|
|
||||||
} else if (protocol == htons(ETH_P_IPV6)) {
|
|
||||||
if (hdr.ipv6->nexthdr != IPPROTO_TCP)
|
|
||||||
return;
|
|
||||||
|
|
||||||
hlen = sizeof(struct ipv6hdr);
|
|
||||||
} else {
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
|
||||||
|
/* snag network header to get L4 type and address */
|
||||||
|
hdr.network = skb_network_header(skb);
|
||||||
|
|
||||||
|
/* Currently only IPv4/IPv6 with TCP is supported
|
||||||
|
* access ihl as u8 to avoid unaligned access on ia64
|
||||||
|
*/
|
||||||
|
if (tx_flags & I40E_TX_FLAGS_IPV4)
|
||||||
|
hlen = (hdr.network[0] & 0x0F) << 2;
|
||||||
|
else if (protocol == htons(ETH_P_IPV6))
|
||||||
|
hlen = sizeof(struct ipv6hdr);
|
||||||
|
else
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
hdr.network = skb_inner_network_header(skb);
|
||||||
|
hlen = skb_inner_network_header_len(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Currently only IPv4/IPv6 with TCP is supported
|
||||||
|
* Note: tx_flags gets modified to reflect inner protocols in
|
||||||
|
* tx_enable_csum function if encap is enabled.
|
||||||
|
*/
|
||||||
|
if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
|
||||||
|
(hdr.ipv4->protocol != IPPROTO_TCP))
|
||||||
|
return;
|
||||||
|
else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
|
||||||
|
(hdr.ipv6->nexthdr != IPPROTO_TCP))
|
||||||
|
return;
|
||||||
|
|
||||||
th = (struct tcphdr *)(hdr.network + hlen);
|
th = (struct tcphdr *)(hdr.network + hlen);
|
||||||
|
|
||||||
/* Due to lack of space, no more new filters can be programmed */
|
/* Due to lack of space, no more new filters can be programmed */
|
||||||
|
@ -2117,16 +2130,14 @@ out:
|
||||||
* i40e_tso - set up the tso context descriptor
|
* i40e_tso - set up the tso context descriptor
|
||||||
* @tx_ring: ptr to the ring to send
|
* @tx_ring: ptr to the ring to send
|
||||||
* @skb: ptr to the skb we're sending
|
* @skb: ptr to the skb we're sending
|
||||||
* @tx_flags: the collected send information
|
|
||||||
* @protocol: the send protocol
|
|
||||||
* @hdr_len: ptr to the size of the packet header
|
* @hdr_len: ptr to the size of the packet header
|
||||||
* @cd_tunneling: ptr to context descriptor bits
|
* @cd_tunneling: ptr to context descriptor bits
|
||||||
*
|
*
|
||||||
* Returns 0 if no TSO can happen, 1 if tso is going, or error
|
* Returns 0 if no TSO can happen, 1 if tso is going, or error
|
||||||
**/
|
**/
|
||||||
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||||
u32 tx_flags, __be16 protocol, u8 *hdr_len,
|
u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
|
||||||
u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
|
u32 *cd_tunneling)
|
||||||
{
|
{
|
||||||
u32 cd_cmd, cd_tso_len, cd_mss;
|
u32 cd_cmd, cd_tso_len, cd_mss;
|
||||||
struct ipv6hdr *ipv6h;
|
struct ipv6hdr *ipv6h;
|
||||||
|
@ -2218,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||||
/**
|
/**
|
||||||
* i40e_tx_enable_csum - Enable Tx checksum offloads
|
* i40e_tx_enable_csum - Enable Tx checksum offloads
|
||||||
* @skb: send buffer
|
* @skb: send buffer
|
||||||
* @tx_flags: Tx flags currently set
|
* @tx_flags: pointer to Tx flags currently set
|
||||||
* @td_cmd: Tx descriptor command bits to set
|
* @td_cmd: Tx descriptor command bits to set
|
||||||
* @td_offset: Tx descriptor header offsets to set
|
* @td_offset: Tx descriptor header offsets to set
|
||||||
* @cd_tunneling: ptr to context desc bits
|
* @cd_tunneling: ptr to context desc bits
|
||||||
**/
|
**/
|
||||||
static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
|
||||||
u32 *td_cmd, u32 *td_offset,
|
u32 *td_cmd, u32 *td_offset,
|
||||||
struct i40e_ring *tx_ring,
|
struct i40e_ring *tx_ring,
|
||||||
u32 *cd_tunneling)
|
u32 *cd_tunneling)
|
||||||
|
@ -2239,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
||||||
switch (ip_hdr(skb)->protocol) {
|
switch (ip_hdr(skb)->protocol) {
|
||||||
case IPPROTO_UDP:
|
case IPPROTO_UDP:
|
||||||
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
|
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
|
||||||
|
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return;
|
return;
|
||||||
|
@ -2248,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
||||||
this_ipv6_hdr = inner_ipv6_hdr(skb);
|
this_ipv6_hdr = inner_ipv6_hdr(skb);
|
||||||
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
|
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
|
||||||
|
|
||||||
if (tx_flags & I40E_TX_FLAGS_IPV4) {
|
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
|
||||||
|
if (*tx_flags & I40E_TX_FLAGS_TSO) {
|
||||||
if (tx_flags & I40E_TX_FLAGS_TSO) {
|
|
||||||
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
|
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
|
||||||
ip_hdr(skb)->check = 0;
|
ip_hdr(skb)->check = 0;
|
||||||
} else {
|
} else {
|
||||||
*cd_tunneling |=
|
*cd_tunneling |=
|
||||||
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
|
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
|
||||||
}
|
}
|
||||||
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
|
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
|
||||||
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
|
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
|
||||||
if (tx_flags & I40E_TX_FLAGS_TSO)
|
if (*tx_flags & I40E_TX_FLAGS_TSO)
|
||||||
ip_hdr(skb)->check = 0;
|
ip_hdr(skb)->check = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2271,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
||||||
skb_transport_offset(skb)) >> 1) <<
|
skb_transport_offset(skb)) >> 1) <<
|
||||||
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
|
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
|
||||||
if (this_ip_hdr->version == 6) {
|
if (this_ip_hdr->version == 6) {
|
||||||
tx_flags &= ~I40E_TX_FLAGS_IPV4;
|
*tx_flags &= ~I40E_TX_FLAGS_IPV4;
|
||||||
tx_flags |= I40E_TX_FLAGS_IPV6;
|
*tx_flags |= I40E_TX_FLAGS_IPV6;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
network_hdr_len = skb_network_header_len(skb);
|
network_hdr_len = skb_network_header_len(skb);
|
||||||
|
@ -2282,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable IP checksum offloads */
|
/* Enable IP checksum offloads */
|
||||||
if (tx_flags & I40E_TX_FLAGS_IPV4) {
|
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
|
||||||
l4_hdr = this_ip_hdr->protocol;
|
l4_hdr = this_ip_hdr->protocol;
|
||||||
/* the stack computes the IP header already, the only time we
|
/* the stack computes the IP header already, the only time we
|
||||||
* need the hardware to recompute it is in the case of TSO.
|
* need the hardware to recompute it is in the case of TSO.
|
||||||
*/
|
*/
|
||||||
if (tx_flags & I40E_TX_FLAGS_TSO) {
|
if (*tx_flags & I40E_TX_FLAGS_TSO) {
|
||||||
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
|
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
|
||||||
this_ip_hdr->check = 0;
|
this_ip_hdr->check = 0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -2296,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
||||||
/* Now set the td_offset for IP header length */
|
/* Now set the td_offset for IP header length */
|
||||||
*td_offset = (network_hdr_len >> 2) <<
|
*td_offset = (network_hdr_len >> 2) <<
|
||||||
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
|
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
|
||||||
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
|
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
|
||||||
l4_hdr = this_ipv6_hdr->nexthdr;
|
l4_hdr = this_ipv6_hdr->nexthdr;
|
||||||
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
|
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
|
||||||
/* Now set the td_offset for IP header length */
|
/* Now set the td_offset for IP header length */
|
||||||
|
@ -2709,7 +2720,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
||||||
else if (protocol == htons(ETH_P_IPV6))
|
else if (protocol == htons(ETH_P_IPV6))
|
||||||
tx_flags |= I40E_TX_FLAGS_IPV6;
|
tx_flags |= I40E_TX_FLAGS_IPV6;
|
||||||
|
|
||||||
tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
|
tso = i40e_tso(tx_ring, skb, &hdr_len,
|
||||||
&cd_type_cmd_tso_mss, &cd_tunneling);
|
&cd_type_cmd_tso_mss, &cd_tunneling);
|
||||||
|
|
||||||
if (tso < 0)
|
if (tso < 0)
|
||||||
|
@ -2735,7 +2746,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
||||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||||
tx_flags |= I40E_TX_FLAGS_CSUM;
|
tx_flags |= I40E_TX_FLAGS_CSUM;
|
||||||
|
|
||||||
i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
|
i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
|
||||||
tx_ring, &cd_tunneling);
|
tx_ring, &cd_tunneling);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -139,6 +139,7 @@ enum i40e_dyn_idx_t {
|
||||||
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
|
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
|
||||||
#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
|
#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
|
||||||
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
|
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
|
||||||
|
#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
|
||||||
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
|
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
|
||||||
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
|
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
|
||||||
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
|
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
|
||||||
|
|
|
@ -1406,16 +1406,14 @@ out:
|
||||||
* i40e_tso - set up the tso context descriptor
|
* i40e_tso - set up the tso context descriptor
|
||||||
* @tx_ring: ptr to the ring to send
|
* @tx_ring: ptr to the ring to send
|
||||||
* @skb: ptr to the skb we're sending
|
* @skb: ptr to the skb we're sending
|
||||||
* @tx_flags: the collected send information
|
|
||||||
* @protocol: the send protocol
|
|
||||||
* @hdr_len: ptr to the size of the packet header
|
* @hdr_len: ptr to the size of the packet header
|
||||||
* @cd_tunneling: ptr to context descriptor bits
|
* @cd_tunneling: ptr to context descriptor bits
|
||||||
*
|
*
|
||||||
* Returns 0 if no TSO can happen, 1 if tso is going, or error
|
* Returns 0 if no TSO can happen, 1 if tso is going, or error
|
||||||
**/
|
**/
|
||||||
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||||
u32 tx_flags, __be16 protocol, u8 *hdr_len,
|
u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
|
||||||
u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
|
u32 *cd_tunneling)
|
||||||
{
|
{
|
||||||
u32 cd_cmd, cd_tso_len, cd_mss;
|
u32 cd_cmd, cd_tso_len, cd_mss;
|
||||||
struct ipv6hdr *ipv6h;
|
struct ipv6hdr *ipv6h;
|
||||||
|
@ -1466,12 +1464,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||||
/**
|
/**
|
||||||
* i40e_tx_enable_csum - Enable Tx checksum offloads
|
* i40e_tx_enable_csum - Enable Tx checksum offloads
|
||||||
* @skb: send buffer
|
* @skb: send buffer
|
||||||
* @tx_flags: Tx flags currently set
|
* @tx_flags: pointer to Tx flags currently set
|
||||||
* @td_cmd: Tx descriptor command bits to set
|
* @td_cmd: Tx descriptor command bits to set
|
||||||
* @td_offset: Tx descriptor header offsets to set
|
* @td_offset: Tx descriptor header offsets to set
|
||||||
* @cd_tunneling: ptr to context desc bits
|
* @cd_tunneling: ptr to context desc bits
|
||||||
**/
|
**/
|
||||||
static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
|
||||||
u32 *td_cmd, u32 *td_offset,
|
u32 *td_cmd, u32 *td_offset,
|
||||||
struct i40e_ring *tx_ring,
|
struct i40e_ring *tx_ring,
|
||||||
u32 *cd_tunneling)
|
u32 *cd_tunneling)
|
||||||
|
@ -1487,6 +1485,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
||||||
switch (ip_hdr(skb)->protocol) {
|
switch (ip_hdr(skb)->protocol) {
|
||||||
case IPPROTO_UDP:
|
case IPPROTO_UDP:
|
||||||
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
|
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
|
||||||
|
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return;
|
return;
|
||||||
|
@ -1496,18 +1495,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
||||||
this_ipv6_hdr = inner_ipv6_hdr(skb);
|
this_ipv6_hdr = inner_ipv6_hdr(skb);
|
||||||
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
|
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
|
||||||
|
|
||||||
if (tx_flags & I40E_TX_FLAGS_IPV4) {
|
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
|
||||||
|
if (*tx_flags & I40E_TX_FLAGS_TSO) {
|
||||||
if (tx_flags & I40E_TX_FLAGS_TSO) {
|
|
||||||
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
|
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
|
||||||
ip_hdr(skb)->check = 0;
|
ip_hdr(skb)->check = 0;
|
||||||
} else {
|
} else {
|
||||||
*cd_tunneling |=
|
*cd_tunneling |=
|
||||||
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
|
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
|
||||||
}
|
}
|
||||||
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
|
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
|
||||||
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
|
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
|
||||||
if (tx_flags & I40E_TX_FLAGS_TSO)
|
if (*tx_flags & I40E_TX_FLAGS_TSO)
|
||||||
ip_hdr(skb)->check = 0;
|
ip_hdr(skb)->check = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1519,8 +1517,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
||||||
skb_transport_offset(skb)) >> 1) <<
|
skb_transport_offset(skb)) >> 1) <<
|
||||||
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
|
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
|
||||||
if (this_ip_hdr->version == 6) {
|
if (this_ip_hdr->version == 6) {
|
||||||
tx_flags &= ~I40E_TX_FLAGS_IPV4;
|
*tx_flags &= ~I40E_TX_FLAGS_IPV4;
|
||||||
tx_flags |= I40E_TX_FLAGS_IPV6;
|
*tx_flags |= I40E_TX_FLAGS_IPV6;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1532,12 +1530,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable IP checksum offloads */
|
/* Enable IP checksum offloads */
|
||||||
if (tx_flags & I40E_TX_FLAGS_IPV4) {
|
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
|
||||||
l4_hdr = this_ip_hdr->protocol;
|
l4_hdr = this_ip_hdr->protocol;
|
||||||
/* the stack computes the IP header already, the only time we
|
/* the stack computes the IP header already, the only time we
|
||||||
* need the hardware to recompute it is in the case of TSO.
|
* need the hardware to recompute it is in the case of TSO.
|
||||||
*/
|
*/
|
||||||
if (tx_flags & I40E_TX_FLAGS_TSO) {
|
if (*tx_flags & I40E_TX_FLAGS_TSO) {
|
||||||
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
|
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
|
||||||
this_ip_hdr->check = 0;
|
this_ip_hdr->check = 0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1546,7 +1544,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
||||||
/* Now set the td_offset for IP header length */
|
/* Now set the td_offset for IP header length */
|
||||||
*td_offset = (network_hdr_len >> 2) <<
|
*td_offset = (network_hdr_len >> 2) <<
|
||||||
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
|
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
|
||||||
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
|
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
|
||||||
l4_hdr = this_ipv6_hdr->nexthdr;
|
l4_hdr = this_ipv6_hdr->nexthdr;
|
||||||
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
|
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
|
||||||
/* Now set the td_offset for IP header length */
|
/* Now set the td_offset for IP header length */
|
||||||
|
@ -1940,7 +1938,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
||||||
else if (protocol == htons(ETH_P_IPV6))
|
else if (protocol == htons(ETH_P_IPV6))
|
||||||
tx_flags |= I40E_TX_FLAGS_IPV6;
|
tx_flags |= I40E_TX_FLAGS_IPV6;
|
||||||
|
|
||||||
tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
|
tso = i40e_tso(tx_ring, skb, &hdr_len,
|
||||||
&cd_type_cmd_tso_mss, &cd_tunneling);
|
&cd_type_cmd_tso_mss, &cd_tunneling);
|
||||||
|
|
||||||
if (tso < 0)
|
if (tso < 0)
|
||||||
|
@ -1961,7 +1959,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
||||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||||
tx_flags |= I40E_TX_FLAGS_CSUM;
|
tx_flags |= I40E_TX_FLAGS_CSUM;
|
||||||
|
|
||||||
i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
|
i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
|
||||||
tx_ring, &cd_tunneling);
|
tx_ring, &cd_tunneling);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -138,6 +138,7 @@ enum i40e_dyn_idx_t {
|
||||||
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
|
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
|
||||||
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
|
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
|
||||||
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
|
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
|
||||||
|
#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
|
||||||
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
|
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
|
||||||
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
|
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
|
||||||
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
|
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
|
||||||
|
|
Loading…
Reference in New Issue