tcp: limit GSO packets to half cwnd
In DC world, GSO packets initially cooked by tcp_sendmsg() are usually big, as sk_pacing_rate is high. When network is congested, cwnd can be smaller than the GSO packets found in socket write queue. tcp_write_xmit() splits GSO packets using the available cwnd, and we end up sending a single GSO packet, consuming all available cwnd. With GRO aggregation on the receiver, we might handle a single GRO packet, sending back a single ACK. 1) This single ACK might be lost TLP or RTO are forced to attempt a retransmit. 2) This ACK releases a full cwnd, sender sends another big GSO packet, in a ping pong mode. This behavior does not fill the pipes in the best way, because of scheduling artifacts. Make sure we always have at least two GSO packets in flight. This allows us to safely increase GRO efficiency without risking spurious retransmits. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6eba82248e
commit
d649a7a81f
|
@ -1562,7 +1562,7 @@ static unsigned int tcp_mss_split_point(const struct sock *sk,
|
|||
static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
u32 in_flight, cwnd;
|
||||
u32 in_flight, cwnd, halfcwnd;
|
||||
|
||||
/* Don't be strict about the congestion window for the final FIN. */
|
||||
if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
|
||||
|
@ -1571,10 +1571,14 @@ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
|
|||
|
||||
in_flight = tcp_packets_in_flight(tp);
|
||||
cwnd = tp->snd_cwnd;
|
||||
if (in_flight < cwnd)
|
||||
return (cwnd - in_flight);
|
||||
|
||||
if (in_flight >= cwnd)
|
||||
return 0;
|
||||
|
||||
/* For better scheduling, ensure we have at least
|
||||
* 2 GSO packets in flight.
|
||||
*/
|
||||
halfcwnd = max(cwnd >> 1, 1U);
|
||||
return min(halfcwnd, cwnd - in_flight);
|
||||
}
|
||||
|
||||
/* Initialize TSO state of a skb.
|
||||
|
|
Loading…
Reference in New Issue