tcp: add tcp_in_slow_start helper
Add a helper to test the slow start condition in various congestion control modules and other places. This is to prepare a slight improvement in policy as to exactly when to slow start. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Nandita Dukkipati <nanditad@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1007f59dce
commit
071d5080e3
|
@ -989,6 +989,11 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
|
|||
|
||||
#define TCP_INFINITE_SSTHRESH 0x7fffffff
|
||||
|
||||
static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
|
||||
{
|
||||
return tp->snd_cwnd <= tp->snd_ssthresh;
|
||||
}
|
||||
|
||||
static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
|
||||
{
|
||||
return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
|
||||
|
@ -1065,7 +1070,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
|
|||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
|
||||
if (tp->snd_cwnd <= tp->snd_ssthresh)
|
||||
if (tcp_in_slow_start(tp))
|
||||
return tp->snd_cwnd < 2 * tp->max_packets_out;
|
||||
|
||||
return tp->is_cwnd_limited;
|
||||
|
|
|
@ -146,7 +146,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
if (!tcp_is_cwnd_limited(sk))
|
||||
return;
|
||||
|
||||
if (tp->snd_cwnd <= tp->snd_ssthresh)
|
||||
if (tcp_in_slow_start(tp))
|
||||
tcp_slow_start(tp, acked);
|
||||
else {
|
||||
bictcp_update(ca, tp->snd_cwnd);
|
||||
|
|
|
@ -413,7 +413,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
return;
|
||||
|
||||
/* In "safe" area, increase. */
|
||||
if (tp->snd_cwnd <= tp->snd_ssthresh) {
|
||||
if (tcp_in_slow_start(tp)) {
|
||||
acked = tcp_slow_start(tp, acked);
|
||||
if (!acked)
|
||||
return;
|
||||
|
|
|
@ -320,7 +320,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
if (!tcp_is_cwnd_limited(sk))
|
||||
return;
|
||||
|
||||
if (tp->snd_cwnd <= tp->snd_ssthresh) {
|
||||
if (tcp_in_slow_start(tp)) {
|
||||
if (hystart && after(ack, ca->end_seq))
|
||||
bictcp_hystart_reset(sk);
|
||||
acked = tcp_slow_start(tp, acked);
|
||||
|
@ -439,7 +439,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
|
|||
ca->delay_min = delay;
|
||||
|
||||
/* hystart triggers when cwnd is larger than some threshold */
|
||||
if (hystart && tp->snd_cwnd <= tp->snd_ssthresh &&
|
||||
if (hystart && tcp_in_slow_start(tp) &&
|
||||
tp->snd_cwnd >= hystart_low_window)
|
||||
hystart_update(sk, delay);
|
||||
}
|
||||
|
|
|
@ -116,7 +116,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
if (!tcp_is_cwnd_limited(sk))
|
||||
return;
|
||||
|
||||
if (tp->snd_cwnd <= tp->snd_ssthresh)
|
||||
if (tcp_in_slow_start(tp))
|
||||
tcp_slow_start(tp, acked);
|
||||
else {
|
||||
/* Update AIMD parameters.
|
||||
|
|
|
@ -236,7 +236,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
if (!tcp_is_cwnd_limited(sk))
|
||||
return;
|
||||
|
||||
if (tp->snd_cwnd <= tp->snd_ssthresh)
|
||||
if (tcp_in_slow_start(tp))
|
||||
tcp_slow_start(tp, acked);
|
||||
else {
|
||||
/* In dangerous area, increase slowly.
|
||||
|
|
|
@ -268,7 +268,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
return;
|
||||
|
||||
/* In slow start */
|
||||
if (tp->snd_cwnd <= tp->snd_ssthresh)
|
||||
if (tcp_in_slow_start(tp))
|
||||
tcp_slow_start(tp, acked);
|
||||
|
||||
else {
|
||||
|
|
|
@ -461,7 +461,7 @@ void tcp_update_metrics(struct sock *sk)
|
|||
tcp_metric_set(tm, TCP_METRIC_CWND,
|
||||
tp->snd_cwnd);
|
||||
}
|
||||
} else if (tp->snd_cwnd > tp->snd_ssthresh &&
|
||||
} else if (!tcp_in_slow_start(tp) &&
|
||||
icsk->icsk_ca_state == TCP_CA_Open) {
|
||||
/* Cong. avoidance phase, cwnd is reliable. */
|
||||
if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
|
||||
|
|
|
@ -22,7 +22,7 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
if (!tcp_is_cwnd_limited(sk))
|
||||
return;
|
||||
|
||||
if (tp->snd_cwnd <= tp->snd_ssthresh)
|
||||
if (tcp_in_slow_start(tp))
|
||||
tcp_slow_start(tp, acked);
|
||||
else
|
||||
tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
|
||||
|
|
|
@ -225,7 +225,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
*/
|
||||
diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
|
||||
|
||||
if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
|
||||
if (diff > gamma && tcp_in_slow_start(tp)) {
|
||||
/* Going too fast. Time to slow down
|
||||
* and switch to congestion avoidance.
|
||||
*/
|
||||
|
@ -240,7 +240,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
|
||||
tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
|
||||
|
||||
} else if (tp->snd_cwnd <= tp->snd_ssthresh) {
|
||||
} else if (tcp_in_slow_start(tp)) {
|
||||
/* Slow start. */
|
||||
tcp_slow_start(tp, acked);
|
||||
} else {
|
||||
|
@ -281,7 +281,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
vegas->minRTT = 0x7fffffff;
|
||||
}
|
||||
/* Use normal slow start */
|
||||
else if (tp->snd_cwnd <= tp->snd_ssthresh)
|
||||
else if (tcp_in_slow_start(tp))
|
||||
tcp_slow_start(tp, acked);
|
||||
}
|
||||
|
||||
|
|
|
@ -150,7 +150,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||
|
||||
veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
|
||||
|
||||
if (tp->snd_cwnd <= tp->snd_ssthresh) {
|
||||
if (tcp_in_slow_start(tp)) {
|
||||
/* Slow start. */
|
||||
tcp_slow_start(tp, acked);
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue