bnx2x: fix GRO parameters

bnx2x does an internal GRO pass but doesn't provide gso_segs, thus
breaking qdisc_pkt_len_init() in case ingress qdisc is used.

We store gso_segs in NAPI_GRO_CB(skb)->count, where tcp_gro_complete()
expects to find the number of aggregated segments.

Signed-off-by: Yuval Mintz <yuvalmin@broadcom.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Yuval Mintz 2013-01-17 03:26:21 +00:00 committed by David S. Miller
parent bc6c47b50c
commit cbf1de7232
1 changed files with 23 additions and 24 deletions

View File

@ -439,31 +439,34 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
*/ */
#define TPA_TSTAMP_OPT_LEN 12 #define TPA_TSTAMP_OPT_LEN 12
/** /**
* bnx2x_set_lro_mss - calculate the approximate value of the MSS * bnx2x_set_gro_params - compute GRO values
* *
* @bp: driver handle * @skb: packet skb
* @parsing_flags: parsing flags from the START CQE * @parsing_flags: parsing flags from the START CQE
* @len_on_bd: total length of the first packet for the * @len_on_bd: total length of the first packet for the
* aggregation. * aggregation.
* @pkt_len: length of all segments
* *
* Approximate value of the MSS for this aggregation calculated using * Approximate value of the MSS for this aggregation calculated using
* the first packet of it. * the first packet of it.
* Compute number of aggregated segments, and gso_type
*/ */
static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
u16 len_on_bd) u16 len_on_bd, unsigned int pkt_len)
{ {
/* /* TPA aggregation won't have either IP options or TCP options
* TPA arrgregation won't have either IP options or TCP options
* other than timestamp or IPv6 extension headers. * other than timestamp or IPv6 extension headers.
*/ */
u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
PRS_FLAG_OVERETH_IPV6) PRS_FLAG_OVERETH_IPV6) {
hdrs_len += sizeof(struct ipv6hdr); hdrs_len += sizeof(struct ipv6hdr);
else /* IPv4 */ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
} else {
hdrs_len += sizeof(struct iphdr); hdrs_len += sizeof(struct iphdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
}
/* Check if there was a TCP timestamp, if there is it's will /* Check if there was a TCP timestamp, if there is it's will
* always be 12 bytes length: nop nop kind length echo val. * always be 12 bytes length: nop nop kind length echo val.
@ -473,7 +476,13 @@ static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
hdrs_len += TPA_TSTAMP_OPT_LEN; hdrs_len += TPA_TSTAMP_OPT_LEN;
return len_on_bd - hdrs_len; skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
* to skb_shinfo(skb)->gso_segs
*/
NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
skb_shinfo(skb)->gso_size);
} }
static int bnx2x_alloc_rx_sge(struct bnx2x *bp, static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@ -527,19 +536,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
} }
/* This is needed in order to enable forwarding support */ /* This is needed in order to enable forwarding support */
if (frag_size) { if (frag_size)
skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
tpa_info->parsing_flags, len_on_bd); le16_to_cpu(cqe->pkt_len));
/* set for GRO */
if (fp->mode == TPA_MODE_GRO && skb_shinfo(skb)->gso_size)
skb_shinfo(skb)->gso_type =
(GET_FLAG(tpa_info->parsing_flags,
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
PRS_FLAG_OVERETH_IPV6) ?
SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
}
#ifdef BNX2X_STOP_ON_ERROR #ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@ -651,7 +650,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct sk_buff *skb) struct sk_buff *skb)
{ {
#ifdef CONFIG_INET #ifdef CONFIG_INET
if (fp->mode == TPA_MODE_GRO && skb_shinfo(skb)->gso_size) { if (skb_shinfo(skb)->gso_size) {
skb_set_network_header(skb, 0); skb_set_network_header(skb, 0);
switch (be16_to_cpu(skb->protocol)) { switch (be16_to_cpu(skb->protocol)) {
case ETH_P_IP: case ETH_P_IP: