net: hns3: clean up for vlan handling in hns3_fill_desc_vtags
This patch refactors the hns3_fill_desc_vtags function by avoiding passing too many parameters, reducing indent level and some other clean up. This patch also adds the hns3_fill_skb_desc function to fill the first desc of a skb. Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Reviewed-by: Peng Li <lipeng321@huawei.com> Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
130509213b
commit
eb977d996e
|
@ -45,6 +45,9 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
|
|||
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
|
||||
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
|
||||
|
||||
#define HNS3_INNER_VLAN_TAG 1
|
||||
#define HNS3_OUTER_VLAN_TAG 2
|
||||
|
||||
/* hns3_pci_tbl - PCI Device ID Table
|
||||
*
|
||||
* Last entry must be all 0s
|
||||
|
@ -961,16 +964,16 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
|
|||
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
|
||||
}
|
||||
|
||||
static int hns3_fill_desc_vtags(struct sk_buff *skb,
|
||||
struct hns3_enet_ring *tx_ring,
|
||||
u32 *inner_vlan_flag,
|
||||
u32 *out_vlan_flag,
|
||||
u16 *inner_vtag,
|
||||
u16 *out_vtag)
|
||||
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
#define HNS3_TX_VLAN_PRIO_SHIFT 13
|
||||
|
||||
struct hnae3_handle *handle = tx_ring->tqp->handle;
|
||||
struct vlan_ethhdr *vhdr;
|
||||
int rc;
|
||||
|
||||
if (!(skb->protocol == htons(ETH_P_8021Q) ||
|
||||
skb_vlan_tag_present(skb)))
|
||||
return 0;
|
||||
|
||||
/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
|
||||
* header is allowed in skb, otherwise it will cause RAS error.
|
||||
|
@ -981,8 +984,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
|
|||
return -EINVAL;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_8021Q) &&
|
||||
!(tx_ring->tqp->handle->kinfo.netdev->features &
|
||||
NETIF_F_HW_VLAN_CTAG_TX)) {
|
||||
!(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
|
||||
/* When HW VLAN acceleration is turned off, and the stack
|
||||
* sets the protocol to 802.1q, the driver just need to
|
||||
* set the protocol to the encapsulated ethertype.
|
||||
|
@ -992,59 +994,35 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
u16 vlan_tag;
|
||||
|
||||
vlan_tag = skb_vlan_tag_get(skb);
|
||||
vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
|
||||
|
||||
/* Based on hw strategy, use out_vtag in two layer tag case,
|
||||
* and use inner_vtag in one tag case.
|
||||
*/
|
||||
if (skb->protocol == htons(ETH_P_8021Q)) {
|
||||
if (handle->port_base_vlan_state ==
|
||||
HNAE3_PORT_BASE_VLAN_DISABLE){
|
||||
hns3_set_field(*out_vlan_flag,
|
||||
HNS3_TXD_OVLAN_B, 1);
|
||||
*out_vtag = vlan_tag;
|
||||
} else {
|
||||
hns3_set_field(*inner_vlan_flag,
|
||||
HNS3_TXD_VLAN_B, 1);
|
||||
*inner_vtag = vlan_tag;
|
||||
if (skb->protocol == htons(ETH_P_8021Q) &&
|
||||
handle->port_base_vlan_state ==
|
||||
HNAE3_PORT_BASE_VLAN_DISABLE)
|
||||
rc = HNS3_OUTER_VLAN_TAG;
|
||||
else
|
||||
rc = HNS3_INNER_VLAN_TAG;
|
||||
|
||||
skb->protocol = vlan_get_protocol(skb);
|
||||
return rc;
|
||||
}
|
||||
} else {
|
||||
hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
|
||||
*inner_vtag = vlan_tag;
|
||||
}
|
||||
} else if (skb->protocol == htons(ETH_P_8021Q)) {
|
||||
struct vlan_ethhdr *vhdr;
|
||||
int rc;
|
||||
|
||||
rc = skb_cow_head(skb, 0);
|
||||
if (unlikely(rc < 0))
|
||||
return rc;
|
||||
|
||||
vhdr = (struct vlan_ethhdr *)skb->data;
|
||||
vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
|
||||
<< HNS3_TX_VLAN_PRIO_SHIFT);
|
||||
}
|
||||
vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
|
||||
& VLAN_PRIO_MASK);
|
||||
|
||||
skb->protocol = vlan_get_protocol(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
unsigned int size, int frag_end,
|
||||
enum hns_desc_type type)
|
||||
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
|
||||
struct sk_buff *skb, struct hns3_desc *desc)
|
||||
{
|
||||
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
|
||||
struct device *dev = ring_to_dev(ring);
|
||||
skb_frag_t *frag;
|
||||
unsigned int frag_buf_num;
|
||||
int k, sizeoflast;
|
||||
dma_addr_t dma;
|
||||
|
||||
if (type == DESC_TYPE_SKB) {
|
||||
struct sk_buff *skb = (struct sk_buff *)priv;
|
||||
u32 ol_type_vlan_len_msec = 0;
|
||||
u32 type_cs_vlan_tso = 0;
|
||||
u32 paylen = skb->len;
|
||||
|
@ -1053,11 +1031,21 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
|||
u16 mss = 0;
|
||||
int ret;
|
||||
|
||||
ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
|
||||
&ol_type_vlan_len_msec,
|
||||
&inner_vtag, &out_vtag);
|
||||
if (unlikely(ret))
|
||||
ret = hns3_handle_vtags(ring, skb);
|
||||
if (unlikely(ret < 0)) {
|
||||
return ret;
|
||||
} else if (ret == HNS3_INNER_VLAN_TAG) {
|
||||
inner_vtag = skb_vlan_tag_get(skb);
|
||||
inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
|
||||
VLAN_PRIO_MASK;
|
||||
hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
|
||||
} else if (ret == HNS3_OUTER_VLAN_TAG) {
|
||||
out_vtag = skb_vlan_tag_get(skb);
|
||||
out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
|
||||
VLAN_PRIO_MASK;
|
||||
hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
|
||||
1);
|
||||
}
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
u8 ol4_proto, il4_proto;
|
||||
|
@ -1089,6 +1077,29 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
|||
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
|
||||
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
unsigned int size, int frag_end,
|
||||
enum hns_desc_type type)
|
||||
{
|
||||
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
|
||||
struct device *dev = ring_to_dev(ring);
|
||||
skb_frag_t *frag;
|
||||
unsigned int frag_buf_num;
|
||||
int k, sizeoflast;
|
||||
dma_addr_t dma;
|
||||
|
||||
if (type == DESC_TYPE_SKB) {
|
||||
struct sk_buff *skb = (struct sk_buff *)priv;
|
||||
int ret;
|
||||
|
||||
ret = hns3_fill_skb_desc(ring, skb, desc);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
||||
} else {
|
||||
frag = (skb_frag_t *)priv;
|
||||
|
|
Loading…
Reference in New Issue