net: hns3: move DMA map into hns3_fill_desc
To solve the L3 checksum error problem which happens when driver
does not clear L3 checksum, DMA map should be done after calling
skb_cow_head.
This patch moves DMA map into hns3_fill_desc to ensure that DMA
map is done after calling skb_cow_head.
Fixes: 76ad4f0ee7
("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC")
Signed-off-by: Fuyun Liang <liangfuyun1@huawei.com>
Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0bbbf15dea
commit
5188f218fc
|
@ -977,18 +977,20 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
int size, dma_addr_t dma, int frag_end,
|
||||
enum hns_desc_type type)
|
||||
int size, int frag_end, enum hns_desc_type type)
|
||||
{
|
||||
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
|
||||
struct device *dev = ring_to_dev(ring);
|
||||
u32 ol_type_vlan_len_msec = 0;
|
||||
u16 bdtp_fe_sc_vld_ra_ri = 0;
|
||||
struct skb_frag_struct *frag;
|
||||
u32 type_cs_vlan_tso = 0;
|
||||
struct sk_buff *skb;
|
||||
u16 inner_vtag = 0;
|
||||
u16 out_vtag = 0;
|
||||
u32 paylen = 0;
|
||||
dma_addr_t dma;
|
||||
u16 mss = 0;
|
||||
u8 ol4_proto;
|
||||
u8 il4_proto;
|
||||
|
@ -997,11 +999,9 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
|||
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
|
||||
desc_cb->priv = priv;
|
||||
desc_cb->length = size;
|
||||
desc_cb->dma = dma;
|
||||
desc_cb->type = type;
|
||||
|
||||
/* now, fill the descriptor */
|
||||
desc->addr = cpu_to_le64(dma);
|
||||
desc->tx.send_size = cpu_to_le16((u16)size);
|
||||
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
|
||||
desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
|
||||
|
@ -1046,8 +1046,21 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
|||
desc->tx.mss = cpu_to_le16(mss);
|
||||
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
|
||||
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
|
||||
|
||||
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
||||
} else {
|
||||
frag = (struct skb_frag_struct *)priv;
|
||||
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (dma_mapping_error(ring->dev, dma)) {
|
||||
ring->stats.sw_err_cnt++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
desc_cb->dma = dma;
|
||||
desc->addr = cpu_to_le64(dma);
|
||||
|
||||
/* move ring pointer to next.*/
|
||||
ring_ptr_move_fw(ring, next_to_use);
|
||||
|
||||
|
@ -1137,12 +1150,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
struct hns3_nic_ring_data *ring_data =
|
||||
&tx_ring_data(priv, skb->queue_mapping);
|
||||
struct hns3_enet_ring *ring = ring_data->ring;
|
||||
struct device *dev = priv->dev;
|
||||
struct netdev_queue *dev_queue;
|
||||
struct skb_frag_struct *frag;
|
||||
int next_to_use_head;
|
||||
int next_to_use_frag;
|
||||
dma_addr_t dma;
|
||||
int buf_num;
|
||||
int seg_num;
|
||||
int size;
|
||||
|
@ -1177,15 +1188,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
|
||||
next_to_use_head = ring->next_to_use;
|
||||
|
||||
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, dma)) {
|
||||
netdev_err(netdev, "TX head DMA map failed\n");
|
||||
ring->stats.sw_err_cnt++;
|
||||
goto out_err_tx_ok;
|
||||
}
|
||||
|
||||
ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
|
||||
DESC_TYPE_SKB);
|
||||
ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
|
||||
DESC_TYPE_SKB);
|
||||
if (ret)
|
||||
goto head_dma_map_err;
|
||||
|
||||
|
@ -1194,15 +1198,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
for (i = 1; i < seg_num; i++) {
|
||||
frag = &skb_shinfo(skb)->frags[i - 1];
|
||||
size = skb_frag_size(frag);
|
||||
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, dma)) {
|
||||
netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
|
||||
ring->stats.sw_err_cnt++;
|
||||
goto frag_dma_map_err;
|
||||
}
|
||||
ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
|
||||
seg_num - 1 == i ? 1 : 0,
|
||||
DESC_TYPE_PAGE);
|
||||
|
||||
ret = priv->ops.fill_desc(ring, frag, size,
|
||||
seg_num - 1 == i ? 1 : 0,
|
||||
DESC_TYPE_PAGE);
|
||||
|
||||
if (ret)
|
||||
goto frag_dma_map_err;
|
||||
|
|
|
@ -419,8 +419,7 @@ struct hns3_nic_ring_data {
|
|||
|
||||
struct hns3_nic_ops {
|
||||
int (*fill_desc)(struct hns3_enet_ring *ring, void *priv,
|
||||
int size, dma_addr_t dma, int frag_end,
|
||||
enum hns_desc_type type);
|
||||
int size, int frag_end, enum hns_desc_type type);
|
||||
int (*maybe_stop_tx)(struct sk_buff **out_skb,
|
||||
int *bnum, struct hns3_enet_ring *ring);
|
||||
void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
|
||||
|
|
Loading…
Reference in New Issue