net: qualcomm: rmnet: Add support for TX checksum offload
TX checksum offload applies to TCP / UDP packets which are not fragmented using the MAPv4 checksum trailer. The following needs to be done to have checksum computed in hardware - 1. Set the checksum start offset and inset offset. 2. Set the csum_enabled bit 3. Compute and set 1's complement of partial checksum field in transport header. Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
23c76eb740
commit
5eb5f8608e
|
@ -141,11 +141,19 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
|
||||||
additional_header_len = 0;
|
additional_header_len = 0;
|
||||||
required_headroom = sizeof(struct rmnet_map_header);
|
required_headroom = sizeof(struct rmnet_map_header);
|
||||||
|
|
||||||
|
if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) {
|
||||||
|
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
|
||||||
|
required_headroom += additional_header_len;
|
||||||
|
}
|
||||||
|
|
||||||
if (skb_headroom(skb) < required_headroom) {
|
if (skb_headroom(skb) < required_headroom) {
|
||||||
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
|
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
|
||||||
|
rmnet_map_checksum_uplink_packet(skb, orig_dev);
|
||||||
|
|
||||||
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
|
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
|
||||||
if (!map_header)
|
if (!map_header)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
|
@ -89,5 +89,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
|
||||||
int hdrlen, int pad);
|
int hdrlen, int pad);
|
||||||
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
|
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
|
||||||
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
|
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
|
||||||
|
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
|
||||||
|
struct net_device *orig_dev);
|
||||||
|
|
||||||
#endif /* _RMNET_MAP_H_ */
|
#endif /* _RMNET_MAP_H_ */
|
||||||
|
|
|
@ -171,6 +171,86 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
|
||||||
|
{
|
||||||
|
struct iphdr *ip4h = (struct iphdr *)iphdr;
|
||||||
|
void *txphdr;
|
||||||
|
u16 *csum;
|
||||||
|
|
||||||
|
txphdr = iphdr + ip4h->ihl * 4;
|
||||||
|
|
||||||
|
if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
|
||||||
|
csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
|
||||||
|
*csum = ~(*csum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
rmnet_map_ipv4_ul_csum_header(void *iphdr,
|
||||||
|
struct rmnet_map_ul_csum_header *ul_header,
|
||||||
|
struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
struct iphdr *ip4h = (struct iphdr *)iphdr;
|
||||||
|
__be16 *hdr = (__be16 *)ul_header, offset;
|
||||||
|
|
||||||
|
offset = htons((__force u16)(skb_transport_header(skb) -
|
||||||
|
(unsigned char *)iphdr));
|
||||||
|
ul_header->csum_start_offset = offset;
|
||||||
|
ul_header->csum_insert_offset = skb->csum_offset;
|
||||||
|
ul_header->csum_enabled = 1;
|
||||||
|
if (ip4h->protocol == IPPROTO_UDP)
|
||||||
|
ul_header->udp_ip4_ind = 1;
|
||||||
|
else
|
||||||
|
ul_header->udp_ip4_ind = 0;
|
||||||
|
|
||||||
|
/* Changing remaining fields to network order */
|
||||||
|
hdr++;
|
||||||
|
*hdr = htons((__force u16)*hdr);
|
||||||
|
|
||||||
|
skb->ip_summed = CHECKSUM_NONE;
|
||||||
|
|
||||||
|
rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
|
static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
|
||||||
|
{
|
||||||
|
struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
|
||||||
|
void *txphdr;
|
||||||
|
u16 *csum;
|
||||||
|
|
||||||
|
txphdr = ip6hdr + sizeof(struct ipv6hdr);
|
||||||
|
|
||||||
|
if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
|
||||||
|
csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
|
||||||
|
*csum = ~(*csum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
|
||||||
|
struct rmnet_map_ul_csum_header *ul_header,
|
||||||
|
struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
__be16 *hdr = (__be16 *)ul_header, offset;
|
||||||
|
|
||||||
|
offset = htons((__force u16)(skb_transport_header(skb) -
|
||||||
|
(unsigned char *)ip6hdr));
|
||||||
|
ul_header->csum_start_offset = offset;
|
||||||
|
ul_header->csum_insert_offset = skb->csum_offset;
|
||||||
|
ul_header->csum_enabled = 1;
|
||||||
|
ul_header->udp_ip4_ind = 0;
|
||||||
|
|
||||||
|
/* Changing remaining fields to network order */
|
||||||
|
hdr++;
|
||||||
|
*hdr = htons((__force u16)*hdr);
|
||||||
|
|
||||||
|
skb->ip_summed = CHECKSUM_NONE;
|
||||||
|
|
||||||
|
rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Adds MAP header to front of skb->data
|
/* Adds MAP header to front of skb->data
|
||||||
* Padding is calculated and set appropriately in MAP header. Mux ID is
|
* Padding is calculated and set appropriately in MAP header. Mux ID is
|
||||||
* initialized to 0.
|
* initialized to 0.
|
||||||
|
@ -281,3 +361,43 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
|
||||||
|
* packets that are supported for UL checksum offload.
|
||||||
|
*/
|
||||||
|
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
|
||||||
|
struct net_device *orig_dev)
|
||||||
|
{
|
||||||
|
struct rmnet_map_ul_csum_header *ul_header;
|
||||||
|
void *iphdr;
|
||||||
|
|
||||||
|
ul_header = (struct rmnet_map_ul_csum_header *)
|
||||||
|
skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
|
||||||
|
|
||||||
|
if (unlikely(!(orig_dev->features &
|
||||||
|
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
|
||||||
|
goto sw_csum;
|
||||||
|
|
||||||
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||||
|
iphdr = (char *)ul_header +
|
||||||
|
sizeof(struct rmnet_map_ul_csum_header);
|
||||||
|
|
||||||
|
if (skb->protocol == htons(ETH_P_IP)) {
|
||||||
|
rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
|
||||||
|
return;
|
||||||
|
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||||
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
|
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
|
||||||
|
return;
|
||||||
|
#else
|
||||||
|
goto sw_csum;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sw_csum:
|
||||||
|
ul_header->csum_start_offset = 0;
|
||||||
|
ul_header->csum_insert_offset = 0;
|
||||||
|
ul_header->csum_enabled = 0;
|
||||||
|
ul_header->udp_ip4_ind = 0;
|
||||||
|
}
|
||||||
|
|
|
@ -189,6 +189,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
rmnet_dev->hw_features = NETIF_F_RXCSUM;
|
rmnet_dev->hw_features = NETIF_F_RXCSUM;
|
||||||
|
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
|
||||||
|
|
||||||
rc = register_netdevice(rmnet_dev);
|
rc = register_netdevice(rmnet_dev);
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
|
|
Loading…
Reference in New Issue