diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 069e7413f1ef..a45223f0cca5 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1489,9 +1489,10 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, int seg_subdescs = 0, desc_cnt = 0; int seg_len, total_len, data_left; int hdr_qentry = qentry; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int hdr_len; + + hdr_len = tso_start(skb, &tso); - tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { char *hdr; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2d0d313ee7c5..9f80a33c5b16 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -710,8 +710,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int total_len, data_left; + int hdr_len, total_len, data_left; struct bufdesc *bdp = txq->bd.cur; struct tso_t tso; unsigned int index = 0; @@ -731,7 +730,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, } /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 4d4b6243318a..90e6111ce534 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -816,10 +816,9 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, struct net_device *dev) { struct mv643xx_eth_private *mp = txq_to_mp(txq); - int total_len, data_left, ret; + int hdr_len, total_len, data_left, ret; int desc_count = 0; struct tso_t tso; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); struct tx_desc *first_tx_desc; u32 first_cmd_sts = 0; @@ -832,7 +831,7 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 946925bbcb2d..95b447c14411 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2604,11 +2604,10 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mvneta_tx_queue *txq) { - int total_len, data_left; + int hdr_len, total_len, data_left; int desc_count = 0; struct mvneta_port *pp = netdev_priv(dev); struct tso_t tso; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int i; /* Count needed descriptors */ @@ -2621,7 +2620,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, } /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 24f4d8e0da98..e9f287568026 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3160,9 +3160,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mvpp2_txq_pcpu *txq_pcpu) { struct mvpp2_port *port = netdev_priv(dev); + int hdr_sz, i, len, descs = 0; struct tso_t tso; - int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); - int i, len, descs = 0; /* Check number of available descriptors */ if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || @@ -3170,7 +3169,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, tso_count_descs(skb))) return 0; - tso_start(skb, &tso); + hdr_sz = tso_start(skb, &tso); + len = skb->len - hdr_sz; while (len > 0) { int left = min_t(int, skb_shinfo(skb)->gso_size, len); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index b04f5429d72d..3a5b34a2a7a6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -619,13 +619,14 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) { struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int tcp_data, seg_len, pkt_len, offset; + int hdr_len, tcp_data, seg_len, pkt_len, offset; struct nix_sqe_hdr_s *sqe_hdr; int first_sqe = sq->head; struct sg_list list; struct tso_t tso; + hdr_len = tso_start(skb, &tso); + /* Map SKB's fragments to DMA. * It's done here to avoid mapping for every TSO segment's packet. */ @@ -636,7 +637,6 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, netdev_tx_sent_queue(txq, skb->len); - tso_start(skb, &tso); tcp_data = skb->len - hdr_len; while (tcp_data > 0) { char *hdr; diff --git a/include/net/tso.h b/include/net/tso.h index 32d9272ade6a..62c98a9c60f1 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -11,6 +11,7 @@ struct tso_t { int size; void *data; u16 ip_id; + u8 tlen; /* transport header len */ bool ipv6; u32 tcp_seq; }; @@ -19,6 +20,6 @@ int tso_count_descs(const struct sk_buff *skb); void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last); void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size); -void tso_start(struct sk_buff *skb, struct tso_t *tso); +int tso_start(struct sk_buff *skb, struct tso_t *tso); #endif /* _TSO_H */ diff --git a/net/core/tso.c b/net/core/tso.c index 56487e3bb26d..9f35518815bd 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -17,7 +17,7 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct tcphdr *tcph; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int hdr_len = skb_transport_offset(skb) + tso->tlen; int mac_hdr_len = skb_network_offset(skb); memcpy(hdr, skb->data, hdr_len); @@ -30,7 +30,7 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, } else { struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len); - iph->payload_len = htons(size + tcp_hdrlen(skb)); + iph->payload_len = htons(size + tso->tlen); } tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); put_unaligned_be32(tso->tcp_seq, &tcph->seq); @@ -62,10 +62,12 @@ void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size) } EXPORT_SYMBOL(tso_build_data); -void tso_start(struct sk_buff *skb, struct tso_t *tso) +int tso_start(struct sk_buff *skb, struct tso_t *tso) { - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int tlen = tcp_hdrlen(skb); + int hdr_len = skb_transport_offset(skb) + tlen; + tso->tlen = tlen; tso->ip_id = ntohs(ip_hdr(skb)->id); tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); tso->next_frag_idx = 0; @@ -83,5 +85,6 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso) tso->data = skb_frag_address(frag); tso->next_frag_idx++; } + return hdr_len; } EXPORT_SYMBOL(tso_start);