net: hns3: add udp tunnel checksum segmentation support

For the device who has the capability to handle udp tunnel
checksum segmentation, add support for it.

Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Huazhong Tan 2020-11-28 11:51:47 +08:00 committed by Jakub Kicinski
parent 57e72c121c
commit 3e2816219d
8 changed files with 33 additions and 8 deletions

View File

@ -87,6 +87,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B,
HNAE3_DEV_SUPPORT_HW_PAD_B,
HNAE3_DEV_SUPPORT_STASH_B,
HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
};
#define hnae3_dev_fd_supported(hdev) \

View File

@ -224,7 +224,8 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen));
dev_info(dev, "(TX)paylen_ol4cs: %u\n",
le32_to_cpu(tx_desc->tx.paylen_ol4cs));
dev_info(dev, "(TX)vld_ra_ri: %u\n",
le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
dev_info(dev, "(TX)mss_hw_csum: %u\n", mss_hw_csum);
@ -328,6 +329,9 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h)
test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support HW TX csum: %s\n",
test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support UDP tunnel csum: %s\n",
test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, caps) ?
"yes" : "no");
}
static void hns3_dbg_dev_specs(struct hnae3_handle *h)

View File

@ -695,7 +695,7 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
}
}
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
u16 *mss, u32 *type_cs_vlan_tso)
{
u32 l4_offset, hdr_len;
@ -723,7 +723,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* tunnel packet */
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_UDP_TUNNEL)) {
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
/* reset l3&l4 pointers from outer to inner headers */
l3.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
@ -752,9 +753,13 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
}
/* find the txbd field values */
*paylen = skb->len - hdr_len;
*paylen_fdop_ol4cs = skb->len - hdr_len;
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
/* offload outer UDP header checksum */
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1);
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
@ -1065,8 +1070,8 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
struct sk_buff *skb, struct hns3_desc *desc)
{
u32 ol_type_vlan_len_msec = 0;
u32 paylen_ol4cs = skb->len;
u32 type_cs_vlan_tso = 0;
u32 paylen = skb->len;
u16 mss_hw_csum = 0;
u16 inner_vtag = 0;
u16 out_vtag = 0;
@ -1125,7 +1130,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
return ret;
}
ret = hns3_set_tso(skb, &paylen, &mss_hw_csum,
ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
&type_cs_vlan_tso);
if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
@ -1140,7 +1145,7 @@ out_hw_tx_csum:
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs);
desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
@ -2399,6 +2404,13 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
}
if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,

View File

@ -172,6 +172,8 @@ enum hns3_nic_state {
#define HNS3_TXD_DECTTL_S 12
#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
#define HNS3_TXD_OL4CS_B 22
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
#define HNS3_TXD_HW_CS_B 14
@ -264,7 +266,7 @@ struct __packed hns3_desc {
};
};
__le32 paylen;
__le32 paylen_ol4cs;
__le16 bdtp_fe_sc_vld_ra_ri;
__le16 mss_hw_csum;
} tx;

View File

@ -357,6 +357,8 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
}
static enum hclge_cmd_status

View File

@ -382,6 +382,7 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_TQP_TXRX_INDEP_B,
HCLGE_CAP_HW_PAD_B,
HCLGE_CAP_STASH_B,
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
};
#define HCLGE_QUERY_CAP_LENGTH 3

View File

@ -338,6 +338,8 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_HW_TX_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
}
static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)

View File

@ -158,6 +158,7 @@ enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_TQP_TXRX_INDEP_B,
HCLGEVF_CAP_HW_PAD_B,
HCLGEVF_CAP_STASH_B,
HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
};
#define HCLGEVF_QUERY_CAP_LENGTH 3