tcp: cache result of earlier divides when mss-aligning things
The results is very unlikely change every so often so we hardly need to divide again after doing that once for a connection. Yet, if divide still becomes necessary we detect that and do the right thing and again settle for non-divide state. Takes the u16 space which was previously taken by the plain xmit_size_goal. This should take care part of the tso vs non-tso difference we found earlier. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0c54b85f28
commit
2a3a041c4e
|
@ -248,6 +248,7 @@ struct tcp_sock {
|
|||
/* inet_connection_sock has to be the first member of tcp_sock */
|
||||
struct inet_connection_sock inet_conn;
|
||||
u16 tcp_header_len; /* Bytes of tcp header to send */
|
||||
u16 xmit_size_goal_segs; /* Goal for segmenting output packets */
|
||||
|
||||
/*
|
||||
* Header prediction flags
|
||||
|
|
|
@ -665,7 +665,7 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
|
|||
int large_allowed)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
u32 xmit_size_goal;
|
||||
u32 xmit_size_goal, old_size_goal;
|
||||
|
||||
xmit_size_goal = mss_now;
|
||||
|
||||
|
@ -676,7 +676,17 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
|
|||
tp->tcp_header_len);
|
||||
|
||||
xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
|
||||
xmit_size_goal -= (xmit_size_goal % mss_now);
|
||||
|
||||
/* We try hard to avoid divides here */
|
||||
old_size_goal = tp->xmit_size_goal_segs * mss_now;
|
||||
|
||||
if (likely(old_size_goal <= xmit_size_goal &&
|
||||
old_size_goal + mss_now > xmit_size_goal)) {
|
||||
xmit_size_goal = old_size_goal;
|
||||
} else {
|
||||
tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
|
||||
xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
|
||||
}
|
||||
}
|
||||
|
||||
return xmit_size_goal;
|
||||
|
|
Loading…
Reference in New Issue