tcp: pass previous skb to tcp_shifted_skb()
No need to recompute previous skb, as it will be a bit more expensive when rtx queue is converted to RB tree. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
8ba6ddaaf8
commit
f33198163a
|
@ -1288,13 +1288,13 @@ static u8 tcp_sacktag_one(struct sock *sk,
|
|||
/* Shift newly-SACKed bytes from this skb to the immediately previous
|
||||
* already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
|
||||
*/
|
||||
static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||
static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
|
||||
struct sk_buff *skb,
|
||||
struct tcp_sacktag_state *state,
|
||||
unsigned int pcount, int shifted, int mss,
|
||||
bool dup_sack)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
|
||||
u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
|
||||
u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
|
||||
|
||||
|
@ -1495,7 +1495,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
|
|||
|
||||
if (!skb_shift(prev, skb, len))
|
||||
goto fallback;
|
||||
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
|
||||
if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
|
||||
goto out;
|
||||
|
||||
/* Hole filled allows collapsing with the next as well, this is very
|
||||
|
@ -1514,7 +1514,8 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
|
|||
len = skb->len;
|
||||
if (skb_shift(prev, skb, len)) {
|
||||
pcount += tcp_skb_pcount(skb);
|
||||
tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
|
||||
tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb),
|
||||
len, mss, 0);
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
Loading…
Reference in New Issue