tcp: add tcpi_segs_in and tcpi_segs_out to tcp_info
This patch tracks the total number of inbound and outbound segments on a TCP socket. One may use this number to have an idea on connection quality when compared against the retransmissions. RFC4898 named these : tcpEStatsPerfSegsIn and tcpEStatsPerfSegsOut These are a 32bit field each and can be fetched both from TCP_INFO getsockopt() if one has a handle on a TCP socket, or from inet_diag netlink facility (iproute2/ss patch will follow) Note that tp->segs_out was placed near tp->snd_nxt for good data locality and minimal performance impact, while tp->segs_in was placed near tp->bytes_received for the same reason. Join work with Eric Dumazet. Note that received SYN are accounted on the listener, but sent SYNACK are not accounted. Signed-off-by: Marcelo Ricardo Leitner <mleitner@redhat.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
48ed7b26fa
commit
2efd055c53
|
@ -149,11 +149,16 @@ struct tcp_sock {
|
||||||
* sum(delta(rcv_nxt)), or how many bytes
|
* sum(delta(rcv_nxt)), or how many bytes
|
||||||
* were acked.
|
* were acked.
|
||||||
*/
|
*/
|
||||||
|
u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
|
||||||
|
* total number of segments in.
|
||||||
|
*/
|
||||||
u32 rcv_nxt; /* What we want to receive next */
|
u32 rcv_nxt; /* What we want to receive next */
|
||||||
u32 copied_seq; /* Head of yet unread data */
|
u32 copied_seq; /* Head of yet unread data */
|
||||||
u32 rcv_wup; /* rcv_nxt on last window update sent */
|
u32 rcv_wup; /* rcv_nxt on last window update sent */
|
||||||
u32 snd_nxt; /* Next sequence we send */
|
u32 snd_nxt; /* Next sequence we send */
|
||||||
|
u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
|
||||||
|
* The total number of segments sent.
|
||||||
|
*/
|
||||||
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
|
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
|
||||||
* sum(delta(snd_una)), or how many bytes
|
* sum(delta(snd_una)), or how many bytes
|
||||||
* were acked.
|
* were acked.
|
||||||
|
|
|
@ -192,8 +192,10 @@ struct tcp_info {
|
||||||
|
|
||||||
__u64 tcpi_pacing_rate;
|
__u64 tcpi_pacing_rate;
|
||||||
__u64 tcpi_max_pacing_rate;
|
__u64 tcpi_max_pacing_rate;
|
||||||
__u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
|
__u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
|
||||||
__u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
|
__u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
|
||||||
|
__u32 tcpi_segs_out; /* RFC4898 tcpEStatsPerfSegsOut */
|
||||||
|
__u32 tcpi_segs_in; /* RFC4898 tcpEStatsPerfSegsIn */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* for TCP_MD5SIG socket option */
|
/* for TCP_MD5SIG socket option */
|
||||||
|
|
|
@ -2695,6 +2695,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
||||||
spin_lock_bh(&sk->sk_lock.slock);
|
spin_lock_bh(&sk->sk_lock.slock);
|
||||||
info->tcpi_bytes_acked = tp->bytes_acked;
|
info->tcpi_bytes_acked = tp->bytes_acked;
|
||||||
info->tcpi_bytes_received = tp->bytes_received;
|
info->tcpi_bytes_received = tp->bytes_received;
|
||||||
|
info->tcpi_segs_out = tp->segs_out;
|
||||||
|
info->tcpi_segs_in = tp->segs_in;
|
||||||
spin_unlock_bh(&sk->sk_lock.slock);
|
spin_unlock_bh(&sk->sk_lock.slock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(tcp_get_info);
|
EXPORT_SYMBOL_GPL(tcp_get_info);
|
||||||
|
|
|
@ -1626,6 +1626,7 @@ process:
|
||||||
skb->dev = NULL;
|
skb->dev = NULL;
|
||||||
|
|
||||||
bh_lock_sock_nested(sk);
|
bh_lock_sock_nested(sk);
|
||||||
|
tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
if (!sock_owned_by_user(sk)) {
|
if (!sock_owned_by_user(sk)) {
|
||||||
if (!tcp_prequeue(sk, skb))
|
if (!tcp_prequeue(sk, skb))
|
||||||
|
|
|
@ -448,6 +448,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
|
||||||
|
|
||||||
newtp->rcv_wup = newtp->copied_seq =
|
newtp->rcv_wup = newtp->copied_seq =
|
||||||
newtp->rcv_nxt = treq->rcv_isn + 1;
|
newtp->rcv_nxt = treq->rcv_isn + 1;
|
||||||
|
newtp->segs_in = 0;
|
||||||
|
|
||||||
newtp->snd_sml = newtp->snd_una =
|
newtp->snd_sml = newtp->snd_una =
|
||||||
newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
|
newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
|
||||||
|
|
|
@ -1027,6 +1027,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
||||||
TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
|
TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
|
||||||
tcp_skb_pcount(skb));
|
tcp_skb_pcount(skb));
|
||||||
|
|
||||||
|
tp->segs_out += tcp_skb_pcount(skb);
|
||||||
/* OK, its time to fill skb_shinfo(skb)->gso_segs */
|
/* OK, its time to fill skb_shinfo(skb)->gso_segs */
|
||||||
skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
|
skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
|
||||||
|
|
||||||
|
|
|
@ -1421,6 +1421,7 @@ process:
|
||||||
skb->dev = NULL;
|
skb->dev = NULL;
|
||||||
|
|
||||||
bh_lock_sock_nested(sk);
|
bh_lock_sock_nested(sk);
|
||||||
|
tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
if (!sock_owned_by_user(sk)) {
|
if (!sock_owned_by_user(sk)) {
|
||||||
if (!tcp_prequeue(sk, skb))
|
if (!tcp_prequeue(sk, skb))
|
||||||
|
|
Loading…
Reference in New Issue