[TCP]: cleanup tcp_{in,out}put.c style

These were manually selected from indent's results which as is
are too noisy to be of any use without human reason. In addition,
some extra newlines between function and its comment were removed
too.

Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ilpo Järvinen 2007-12-31 14:57:14 -08:00 committed by David S. Miller
parent 058dc3342b
commit 056834d9f6
2 changed files with 300 additions and 282 deletions

View File

@ -121,8 +121,7 @@ int sysctl_tcp_abc __read_mostly;
/* Adapt the MSS value used to make delayed ack decision to the
* real world.
*/
static void tcp_measure_rcv_mss(struct sock *sk,
const struct sk_buff *skb)
static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const unsigned int lss = icsk->icsk_ack.last_seg_size;
@ -303,8 +302,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
return 0;
}
static void tcp_grow_window(struct sock *sk,
struct sk_buff *skb)
static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@ -323,7 +321,8 @@ static void tcp_grow_window(struct sock *sk,
incr = __tcp_grow_window(sk, skb);
if (incr) {
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
tp->window_clamp);
inet_csk(sk)->icsk_ack.quick |= 1;
}
}
@ -401,7 +400,6 @@ static void tcp_clamp_window(struct sock *sk)
tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
}
/* Initialize RCV_MSS value.
* RCV_MSS is an our guess about MSS used by the peer.
* We haven't any direct information about the MSS.
@ -471,16 +469,15 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
goto new_measure;
if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
return;
tcp_rcv_rtt_update(tp,
jiffies - tp->rcv_rtt_est.time,
1);
tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);
new_measure:
tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
tp->rcv_rtt_est.time = tcp_time_stamp;
}
static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb)
static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->rx_opt.rcv_tsecr &&
@ -503,8 +500,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
goto new_measure;
time = tcp_time_stamp - tp->rcvq_space.time;
if (time < (tp->rcv_rtt_est.rtt >> 3) ||
tp->rcv_rtt_est.rtt == 0)
if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
return;
space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
@ -1353,12 +1349,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
}
if (in_sack <= 0)
in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
in_sack = tcp_match_skb_to_sack(sk, skb, start_seq,
end_seq);
if (unlikely(in_sack < 0))
break;
if (in_sack)
*flag |= tcp_sacktag_one(skb, sk, reord, dup_sack, *fack_count);
*flag |= tcp_sacktag_one(skb, sk, reord, dup_sack,
*fack_count);
*fack_count += tcp_skb_pcount(skb);
}
@ -1407,7 +1405,8 @@ static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache)
}
static int
tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
u32 prior_snd_una)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@ -1539,17 +1538,21 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
/* Head todo? */
if (before(start_seq, cache->start_seq)) {
skb = tcp_sacktag_skip(skb, sk, start_seq);
skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq,
cache->start_seq, dup_sack,
&fack_count, &reord, &flag);
skb = tcp_sacktag_walk(skb, sk, next_dup,
start_seq,
cache->start_seq,
dup_sack, &fack_count,
&reord, &flag);
}
/* Rest of the block already fully processed? */
if (!after(end_seq, cache->end_seq))
goto advance_sp;
skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, cache->end_seq,
&fack_count, &reord, &flag);
skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
cache->end_seq,
&fack_count, &reord,
&flag);
/* ...tail remains todo... */
if (tcp_highest_sack_seq(tp) == cache->end_seq) {
@ -2116,8 +2119,7 @@ static int tcp_time_to_recover(struct sock *sk)
* retransmitted past LOST markings in the first place? I'm not fully sure
* about undo and end of connection cases, which can cause R without L?
*/
static void tcp_verify_retransmit_hint(struct tcp_sock *tp,
struct sk_buff *skb)
static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
{
if ((tp->retransmit_skb_hint != NULL) &&
before(TCP_SKB_CB(skb)->seq,
@ -2314,8 +2316,7 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
static inline int tcp_may_undo(struct tcp_sock *tp)
{
return tp->undo_marker &&
(!tp->undo_retrans || tcp_packet_delayed(tp));
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
}
/* People celebrate: "We love our President!" */
@ -2480,7 +2481,6 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
/* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and
@ -2492,8 +2492,7 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
* It does _not_ decide what to send, it is made in function
* tcp_xmit_retransmit_queue().
*/
static void
tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@ -2725,7 +2724,8 @@ static void tcp_rearm_rto(struct sock *sk)
if (!tp->packets_out) {
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
} else {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
}
}
@ -2803,8 +2803,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
flag |= FLAG_RETRANS_DATA_ACKED;
ca_seq_rtt = -1;
seq_rtt = -1;
if ((flag & FLAG_DATA_ACKED) ||
(acked_pcount > 1))
if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1))
flag |= FLAG_NONHEAD_RETRANS_ACKED;
} else {
ca_seq_rtt = now - scb->when;
@ -2950,8 +2949,9 @@ static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
/* Check that window update is acceptable.
* The function assumes that snd_una<=ack<=snd_next.
*/
static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
const u32 ack_seq, const u32 nwin)
static inline int tcp_may_update_window(const struct tcp_sock *tp,
const u32 ack, const u32 ack_seq,
const u32 nwin)
{
return (after(ack, tp->snd_una) ||
after(ack_seq, tp->snd_wl1) ||
@ -3098,9 +3098,11 @@ static int tcp_process_frto(struct sock *sk, int flag)
if ((tp->frto_counter >= 2) &&
(!(flag & FLAG_FORWARD_PROGRESS) ||
((flag&FLAG_DATA_SACKED) && !(flag&FLAG_ONLY_ORIG_SACKED)))) {
((flag & FLAG_DATA_SACKED) &&
!(flag & FLAG_ONLY_ORIG_SACKED)))) {
/* RFC4138 shortcoming (see comment above) */
if (!(flag&FLAG_FORWARD_PROGRESS) && (flag&FLAG_NOT_DUP))
if (!(flag & FLAG_FORWARD_PROGRESS) &&
(flag & FLAG_NOT_DUP))
return 1;
tcp_enter_frto_loss(sk, 3, flag);
@ -3166,7 +3168,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
tp->bytes_acked += ack - prior_snd_una;
else if (icsk->icsk_ca_state == TCP_CA_Loss)
/* we assume just one segment left network */
tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
tp->bytes_acked += min(ack - prior_snd_una,
tp->mss_cache);
}
prior_fackets = tp->fackets_out;
@ -3224,7 +3227,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight);
tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, flag);
tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight);
@ -3255,12 +3259,12 @@ uninteresting_ack:
return 0;
}
/* Look for tcp options. Normally only called on SYN and SYNACK packets.
* But, this can also be called on packets in the established flow when
* the fast version below fails.
*/
void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
int estab)
{
unsigned char *ptr;
struct tcphdr *th = tcp_hdr(skb);
@ -3444,7 +3448,8 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
}
static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb)
static inline int tcp_paws_discard(const struct sock *sk,
const struct sk_buff *skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
@ -3577,7 +3582,8 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
}
}
static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
u32 end_seq)
{
if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
if (before(seq, sp->start_seq))
@ -3600,7 +3606,8 @@ static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
tp->rx_opt.dsack = 1;
tp->duplicate_sack[0].start_seq = seq;
tp->duplicate_sack[0].end_seq = end_seq;
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 4 - tp->rx_opt.tstamp_ok);
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1,
4 - tp->rx_opt.tstamp_ok);
}
}
@ -3653,7 +3660,9 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
* Decrease num_sacks.
*/
tp->rx_opt.num_sacks--;
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks +
tp->rx_opt.dsack,
4 - tp->rx_opt.tstamp_ok);
for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
sp[i] = sp[i + 1];
continue;
@ -3662,7 +3671,8 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
}
}
static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2)
static inline void tcp_sack_swap(struct tcp_sack_block *sack1,
struct tcp_sack_block *sack2)
{
__u32 tmp;
@ -3715,7 +3725,8 @@ new_sack:
sp->start_seq = seq;
sp->end_seq = end_seq;
tp->rx_opt.num_sacks++;
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack,
4 - tp->rx_opt.tstamp_ok);
}
/* RCV.NXT advances, some SACKs should be eaten. */
@ -3752,7 +3763,9 @@ static void tcp_sack_remove(struct tcp_sock *tp)
}
if (num_sacks != tp->rx_opt.num_sacks) {
tp->rx_opt.num_sacks = num_sacks;
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks +
tp->rx_opt.dsack,
4 - tp->rx_opt.tstamp_ok);
}
}
@ -3981,7 +3994,8 @@ drop:
}
if (after(seq, TCP_SKB_CB(skb1)->seq)) {
/* Partial overlap. */
tcp_dsack_set(tp, seq, TCP_SKB_CB(skb1)->end_seq);
tcp_dsack_set(tp, seq,
TCP_SKB_CB(skb1)->end_seq);
} else {
skb1 = skb1->prev;
}
@ -3993,11 +4007,13 @@ drop:
(struct sk_buff *)&tp->out_of_order_queue &&
after(end_seq, TCP_SKB_CB(skb1)->seq)) {
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq,
end_seq);
break;
}
__skb_unlink(skb1, &tp->out_of_order_queue);
tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq);
__kfree_skb(skb1);
}
@ -4210,7 +4226,6 @@ static int tcp_prune_queue(struct sock *sk)
return -1;
}
/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
* As additional protections, we do not touch cwnd in retransmission phases,
* and if application hit its sndbuf limit recently.
@ -4314,8 +4329,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
/* We ACK each frame or... */
tcp_in_quickack_mode(sk) ||
/* We have out of order data. */
(ofo_possible &&
skb_peek(&tp->out_of_order_queue))) {
(ofo_possible && skb_peek(&tp->out_of_order_queue))) {
/* Then ack it now */
tcp_send_ack(sk);
} else {
@ -4392,8 +4406,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
* buggy users.
*/
if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
!sock_flag(sk, SOCK_URGINLINE) &&
tp->copied_seq != tp->rcv_nxt) {
!sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
tp->copied_seq++;
if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
@ -4458,7 +4471,8 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
return err;
}
static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
static __sum16 __tcp_checksum_complete_user(struct sock *sk,
struct sk_buff *skb)
{
__sum16 result;
@ -4472,14 +4486,16 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb
return result;
}
static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
static inline int tcp_checksum_complete_user(struct sock *sk,
struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
__tcp_checksum_complete_user(sk, skb);
}
#ifdef CONFIG_NET_DMA
static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen)
static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
int hlen)
{
struct tcp_sock *tp = tcp_sk(sk);
int chunk = skb->len - hlen;
@ -4495,7 +4511,9 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list);
skb, hlen,
tp->ucopy.iov, chunk,
tp->ucopy.pinned_list);
if (dma_cookie < 0)
goto out;
@ -4646,7 +4664,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
eaten = 1;
}
#endif
if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) {
if (tp->ucopy.task == current &&
sock_owned_by_user(sk) && !copied_early) {
__set_current_state(TASK_RUNNING);
if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
@ -4975,7 +4994,8 @@ discard:
}
/* PAWS check. */
if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_check(&tp->rx_opt, 0))
if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
tcp_paws_check(&tp->rx_opt, 0))
goto discard_and_undo;
if (th->syn) {
@ -5010,7 +5030,6 @@ discard:
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
tcp_initialize_rcv_mss(sk);
tcp_send_synack(sk);
#if 0
/* Note, we could accept data and URG from this segment.
@ -5042,7 +5061,6 @@ reset_and_undo:
return 1;
}
/*
* This function implements the receiving procedure of RFC 793 for
* all states except ESTABLISHED and TIME_WAIT.
@ -5176,8 +5194,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* and does not calculate rtt.
* Fix it at least with timestamps.
*/
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!tp->srtt)
if (tp->rx_opt.saw_tstamp &&
tp->rx_opt.rcv_tsecr && !tp->srtt)
tcp_ack_saw_tstamp(sk, 0);
if (tp->rx_opt.tstamp_ok)

View File

@ -278,8 +278,7 @@ static u16 tcp_select_window(struct sock *sk)
return new_win;
}
static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
struct sk_buff *skb)
static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
if (!(tp->ecn_flags & TCP_ECN_OK))
@ -447,7 +446,8 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
* We are working here with either a clone of the original
* SKB, or a fresh unique copy made by the retransmit engine.
*/
static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
gfp_t gfp_mask)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet;
@ -616,7 +616,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
#undef SYSCTL_FLAG_SACK
}
/* This routine just queue's the buffer
*
* NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@ -634,7 +633,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
sk_mem_charge(sk, skb->truesize);
}
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
if (skb->len <= mss_now || !sk_can_gso(sk)) {
/* Avoid the costly divide in the normal
@ -670,7 +670,8 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
* packet to the list. This won't be called frequently, I hope.
* Remember, these are still headerless SKBs at this point.
*/
int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
unsigned int mss_now)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
@ -714,7 +715,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
/* Copy and checksum data tail into the new buffer. */
buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
buff->csum = csum_partial_copy_nocheck(skb->data + len,
skb_put(buff, nsize),
nsize, 0);
skb_trim(skb, len);
@ -804,8 +806,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
{
if (skb_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return -ENOMEM;
/* If len == headlen, we avoid __skb_pull to preserve alignment. */
@ -909,7 +910,6 @@ void tcp_mtup_init(struct sock *sk)
NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
are READ ONLY outside this function. --ANK (980731)
*/
unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
{
struct tcp_sock *tp = tcp_sk(sk);
@ -977,8 +977,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
inet_csk(sk)->icsk_ext_hdr_len -
tp->tcp_header_len);
if (tp->max_window &&
(xmit_size_goal > (tp->max_window >> 1)))
if (tp->max_window && (xmit_size_goal > (tp->max_window >> 1)))
xmit_size_goal = max((tp->max_window >> 1),
68U - tp->tcp_header_len);
@ -990,7 +989,6 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
}
/* Congestion window validation. (RFC2861) */
static void tcp_cwnd_validate(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@ -1017,8 +1015,7 @@ static void tcp_cwnd_validate(struct sock *sk)
* per input skb which could be mostly avoided here (if desired).
*/
static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
unsigned int mss_now,
unsigned int cwnd)
unsigned int mss_now, unsigned int cwnd)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 needed, window, cwnd_len;
@ -1039,7 +1036,8 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
/* Can at least one segment of SKB be sent right now, according to the
* congestion window rules? If so, return how many segments are allowed.
*/
static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
struct sk_buff *skb)
{
u32 in_flight, cwnd;
@ -1059,13 +1057,12 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk
/* This must be invoked the first time we consider transmitting
* SKB onto the wire.
*/
static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
int tso_segs = tcp_skb_pcount(skb);
if (!tso_segs ||
(tso_segs > 1 &&
tcp_skb_mss(skb) != mss_now)) {
if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
tcp_set_skb_tso_segs(sk, skb, mss_now);
tso_segs = tcp_skb_pcount(skb);
}
@ -1085,16 +1082,13 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp)
* 4. Or TCP_CORK is not set, and all sent packets are ACKed.
* With Minshall's modification: all sent small packets are ACKed.
*/
static inline int tcp_nagle_check(const struct tcp_sock *tp,
const struct sk_buff *skb,
unsigned mss_now, int nonagle)
{
return (skb->len < mss_now &&
((nonagle & TCP_NAGLE_CORK) ||
(!nonagle &&
tp->packets_out &&
tcp_minshall_check(tp))));
(!nonagle && tp->packets_out && tcp_minshall_check(tp))));
}
/* Return non-zero if the Nagle test allows this packet to be
@ -1126,7 +1120,8 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
}
/* Does at least the first segment of SKB fit into the send window? */
static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
unsigned int cur_mss)
{
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@ -1152,8 +1147,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
return 0;
cwnd_quota = tcp_cwnd_test(tp, skb);
if (cwnd_quota &&
!tcp_snd_wnd_test(tp, skb, cur_mss))
if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
cwnd_quota = 0;
return cwnd_quota;
@ -1177,7 +1171,8 @@ int tcp_may_send_now(struct sock *sk)
* know that all the data is in scatter-gather pages, and that the
* packet has never been sent out before (and thus is not cloned).
*/
static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
unsigned int mss_now)
{
struct sk_buff *buff;
int nlen = skb->len - len;
@ -1247,8 +1242,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
in_flight = tcp_packets_in_flight(tp);
BUG_ON(tcp_skb_pcount(skb) <= 1 ||
(tp->snd_cwnd <= in_flight));
BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
@ -1293,7 +1287,8 @@ send_now:
/* Create a new MTU probe if we are ready.
* Returns 0 if we should wait to probe (no cwnd available),
* 1 if a probe was sent,
* -1 otherwise */
* -1 otherwise
*/
static int tcp_mtu_probe(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@ -1366,7 +1361,8 @@ static int tcp_mtu_probe(struct sock *sk)
skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
else
nskb->csum = skb_copy_and_csum_bits(skb, 0,
skb_put(nskb, copy), copy, nskb->csum);
skb_put(nskb, copy),
copy, nskb->csum);
if (skb->len <= copy) {
/* We've eaten all the data from this skb.
@ -1380,7 +1376,8 @@ static int tcp_mtu_probe(struct sock *sk)
if (!skb_shinfo(skb)->nr_frags) {
skb_pull(skb, copy);
if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->csum = csum_partial(skb->data, skb->len, 0);
skb->csum = csum_partial(skb->data,
skb->len, 0);
} else {
__pskb_trim_head(skb, copy);
tcp_set_skb_tso_segs(sk, skb, mss_now);
@ -1414,7 +1411,6 @@ static int tcp_mtu_probe(struct sock *sk)
return -1;
}
/* This routine writes packets to the network. It advances the
* send_head. This happens as incoming acks open up the remote
* window for us.
@ -1626,7 +1622,8 @@ u32 __tcp_select_window(struct sock *sk)
icsk->icsk_ack.quick = 0;
if (tcp_memory_pressure)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
tp->rcv_ssthresh = min(tp->rcv_ssthresh,
4U * tp->advmss);
if (free_space < mss)
return 0;
@ -1669,7 +1666,8 @@ u32 __tcp_select_window(struct sock *sk)
}
/* Attempt to collapse two adjacent SKB's during retransmission. */
static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb,
int mss_now)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
@ -1846,8 +1844,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
(skb->len < (cur_mss >> 1)) &&
(tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
(!tcp_skb_is_last(sk, skb)) &&
(skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
(tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
(skb_shinfo(skb)->nr_frags == 0 &&
skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
(tcp_skb_pcount(skb) == 1 &&
tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
(sysctl_tcp_retrans_collapse != 0))
tcp_retrans_try_collapse(sk, skb, cur_mss);
@ -2036,7 +2036,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
}
}
/* Send a fin. The caller locks the socket for us. This cannot be
* allowed to fail queueing a FIN frame under any circumstances.
*/
@ -2374,7 +2373,8 @@ void tcp_send_delayed_ack(struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
int max_ato = HZ / 2;
if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
if (icsk->icsk_ack.pingpong ||
(icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
max_ato = TCP_DELACK_MAX;
/* Slow path, intersegment interval is "high". */