2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Syncookies implementation for the Linux kernel
|
|
|
|
*
|
|
|
|
* Copyright (C) 1997 Andi Kleen
|
2007-02-09 22:24:47 +08:00
|
|
|
* Based on ideas by D.J.Bernstein and Eric Schenk.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/random.h>
|
2017-01-08 20:54:03 +08:00
|
|
|
#include <linux/siphash.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/kernel.h>
|
2011-07-15 23:47:34 +08:00
|
|
|
#include <linux/export.h>
|
2017-05-05 21:56:54 +08:00
|
|
|
#include <net/secure_seq.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <net/tcp.h>
|
2008-10-01 22:44:42 +08:00
|
|
|
#include <net/route.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-01-08 20:54:03 +08:00
|
|
|
static siphash_key_t syncookie_secret[2] __read_mostly;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#define COOKIEBITS 24 /* Upper bits store count */
|
|
|
|
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
|
|
|
|
|
2014-11-04 00:35:01 +08:00
|
|
|
/* TCP Timestamp: 6 lowest bits of timestamp sent in the cookie SYN-ACK
|
|
|
|
* stores TCP options:
|
|
|
|
*
|
|
|
|
* MSB LSB
|
|
|
|
* | 31 ... 6 | 5 | 4 | 3 2 1 0 |
|
|
|
|
* | Timestamp | ECN | SACK | WScale |
|
|
|
|
*
|
|
|
|
* When we receive a valid cookie-ACK, we look at the echoed tsval (if
|
|
|
|
* any) to figure out which TCP options we should use for the rebuilt
|
|
|
|
* connection.
|
|
|
|
*
|
|
|
|
* A WScale setting of '0xf' (which is an invalid scaling value)
|
|
|
|
* means that original syn did not include the TCP window scaling option.
|
|
|
|
*/
|
|
|
|
#define TS_OPT_WSCALE_MASK 0xf
|
|
|
|
#define TS_OPT_SACK BIT(4)
|
|
|
|
#define TS_OPT_ECN BIT(5)
|
|
|
|
/* There is no TS_OPT_TIMESTAMP:
|
|
|
|
* if ACK contains timestamp option, we already know it was
|
|
|
|
* requested/supported by the syn/synack exchange.
|
|
|
|
*/
|
|
|
|
#define TSBITS 6
|
|
|
|
#define TSMASK (((__u32)1 << TSBITS) - 1)
|
|
|
|
|
2006-11-15 12:51:49 +08:00
|
|
|
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
|
2005-04-17 06:20:36 +08:00
|
|
|
u32 count, int c)
|
|
|
|
{
|
2013-10-20 03:48:56 +08:00
|
|
|
net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
|
2017-01-08 20:54:03 +08:00
|
|
|
return siphash_4u32((__force u32)saddr, (__force u32)daddr,
|
|
|
|
(__force u32)sport << 16 | (__force u32)dport,
|
|
|
|
count, &syncookie_secret[c]);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-04-10 18:12:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* when syncookies are in effect and tcp timestamps are enabled we encode
|
2010-06-21 19:48:44 +08:00
|
|
|
* tcp options in the lower bits of the timestamp value that will be
|
2008-04-10 18:12:40 +08:00
|
|
|
* sent in the syn-ack.
|
|
|
|
* Since subsequent timestamps use the normal tcp_time_stamp value, we
|
|
|
|
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
|
|
|
|
*/
|
2017-05-17 05:00:14 +08:00
|
|
|
u64 cookie_init_timestamp(struct request_sock *req)
|
2008-04-10 18:12:40 +08:00
|
|
|
{
|
|
|
|
struct inet_request_sock *ireq;
|
2017-05-17 05:00:14 +08:00
|
|
|
u32 ts, ts_now = tcp_time_stamp_raw();
|
2008-04-10 18:12:40 +08:00
|
|
|
u32 options = 0;
|
|
|
|
|
|
|
|
ireq = inet_rsk(req);
|
2010-06-21 19:48:44 +08:00
|
|
|
|
2014-11-04 00:35:01 +08:00
|
|
|
options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
|
|
|
|
if (ireq->sack_ok)
|
|
|
|
options |= TS_OPT_SACK;
|
|
|
|
if (ireq->ecn_ok)
|
|
|
|
options |= TS_OPT_ECN;
|
2008-04-10 18:12:40 +08:00
|
|
|
|
|
|
|
ts = ts_now & ~TSMASK;
|
|
|
|
ts |= options;
|
|
|
|
if (ts > ts_now) {
|
|
|
|
ts >>= TSBITS;
|
|
|
|
ts--;
|
|
|
|
ts <<= TSBITS;
|
|
|
|
ts |= options;
|
|
|
|
}
|
2018-09-21 23:51:50 +08:00
|
|
|
return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
|
2008-04-10 18:12:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-11-15 12:51:49 +08:00
|
|
|
static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
|
2013-09-21 04:32:55 +08:00
|
|
|
__be16 dport, __u32 sseq, __u32 data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Compute the secure sequence number.
|
|
|
|
* The output should be:
|
2007-02-09 22:24:47 +08:00
|
|
|
* HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24)
|
2005-04-17 06:20:36 +08:00
|
|
|
* + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24).
|
|
|
|
* Where sseq is their sequence number and count increases every
|
|
|
|
* minute by 1.
|
|
|
|
* As an extra hack, we add a small "data" value that encodes the
|
|
|
|
* MSS into the second hash value.
|
|
|
|
*/
|
2013-09-21 04:32:55 +08:00
|
|
|
u32 count = tcp_cookie_time();
|
2005-04-17 06:20:36 +08:00
|
|
|
return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
|
|
|
|
sseq + (count << COOKIEBITS) +
|
|
|
|
((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
|
|
|
|
& COOKIEMASK));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This retrieves the small "data" value from the syncookie.
|
|
|
|
* If the syncookie is bad, the data returned will be out of
|
|
|
|
* range. This must be checked by the caller.
|
|
|
|
*
|
2013-09-21 04:32:55 +08:00
|
|
|
* The count value used to generate the cookie must be less than
|
|
|
|
* MAX_SYNCOOKIE_AGE minutes in the past.
|
|
|
|
* The return value (__u32)-1 if this test fails.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-11-15 12:51:49 +08:00
|
|
|
static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
|
2013-09-21 04:32:55 +08:00
|
|
|
__be16 sport, __be16 dport, __u32 sseq)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-09-21 04:32:55 +08:00
|
|
|
u32 diff, count = tcp_cookie_time();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Strip away the layers from the cookie */
|
|
|
|
cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
|
|
|
|
|
|
|
|
/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
|
2013-12-23 14:37:27 +08:00
|
|
|
diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
|
2013-09-21 04:32:55 +08:00
|
|
|
if (diff >= MAX_SYNCOOKIE_AGE)
|
2005-04-17 06:20:36 +08:00
|
|
|
return (__u32)-1;
|
|
|
|
|
|
|
|
return (cookie -
|
|
|
|
cookie_hash(saddr, daddr, sport, dport, count - diff, 1))
|
|
|
|
& COOKIEMASK; /* Leaving the data behind */
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:24:47 +08:00
|
|
|
/*
|
2013-09-21 04:32:56 +08:00
|
|
|
* MSS Values are chosen based on the 2011 paper
|
|
|
|
* 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson.
|
|
|
|
* Values ..
|
|
|
|
* .. lower than 536 are rare (< 0.2%)
|
|
|
|
* .. between 537 and 1299 account for less than < 1.5% of observed values
|
|
|
|
* .. in the 1300-1349 range account for about 15 to 20% of observed mss values
|
|
|
|
* .. exceeding 1460 are very rare (< 0.04%)
|
2010-06-03 08:43:57 +08:00
|
|
|
*
|
2013-09-21 04:32:56 +08:00
|
|
|
* 1460 is the single most frequently announced mss value (30 to 46% depending
|
|
|
|
* on monitor location). Table must be sorted.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
static __u16 const msstab[] = {
|
2010-06-03 08:43:57 +08:00
|
|
|
536,
|
2013-09-21 04:32:56 +08:00
|
|
|
1300,
|
|
|
|
1440, /* 1440, 1452: PPPoE */
|
2010-06-03 08:43:57 +08:00
|
|
|
1460,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generate a syncookie. mssp points to the mss, which is returned
|
|
|
|
* rounded down to the value encoded in the cookie.
|
|
|
|
*/
|
2013-08-27 14:50:13 +08:00
|
|
|
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
|
|
|
|
u16 *mssp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int mssind;
|
|
|
|
const __u16 mss = *mssp;
|
|
|
|
|
2010-06-03 08:43:57 +08:00
|
|
|
for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
|
|
|
|
if (mss >= msstab[mssind])
|
|
|
|
break;
|
|
|
|
*mssp = msstab[mssind];
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-04-11 12:04:22 +08:00
|
|
|
return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
|
|
|
|
th->source, th->dest, ntohl(th->seq),
|
2013-09-21 04:32:55 +08:00
|
|
|
mssind);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2013-08-27 14:50:13 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
|
|
|
|
|
2015-09-29 22:42:49 +08:00
|
|
|
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mssp)
|
2013-08-27 14:50:13 +08:00
|
|
|
{
|
|
|
|
const struct iphdr *iph = ip_hdr(skb);
|
|
|
|
const struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
|
|
|
|
return __cookie_v4_init_sequence(iph, th, mssp);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-02-09 22:24:47 +08:00
|
|
|
/*
|
|
|
|
* Check if a ack sequence number is a valid syncookie.
|
2005-04-17 06:20:36 +08:00
|
|
|
* Return the decoded mss if it is, or 0 if not.
|
|
|
|
*/
|
2013-08-27 14:50:13 +08:00
|
|
|
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
|
|
|
|
u32 cookie)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-04-11 12:04:22 +08:00
|
|
|
__u32 seq = ntohl(th->seq) - 1;
|
|
|
|
__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
|
2013-09-21 04:32:55 +08:00
|
|
|
th->source, th->dest, seq);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-06-03 08:43:57 +08:00
|
|
|
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2013-08-27 14:50:13 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__cookie_v4_check);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-06-05 09:30:43 +08:00
|
|
|
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
|
|
|
|
struct request_sock *req,
|
2017-05-05 21:56:54 +08:00
|
|
|
struct dst_entry *dst, u32 tsoff)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-12-14 15:15:52 +08:00
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sock *child;
|
2015-10-22 23:20:46 +08:00
|
|
|
bool own_req;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-10-22 23:20:46 +08:00
|
|
|
child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
|
|
|
|
NULL, &own_req);
|
2015-03-18 09:32:31 +08:00
|
|
|
if (child) {
|
2017-06-30 18:08:01 +08:00
|
|
|
refcount_set(&req->rsk_refcnt, 1);
|
2017-05-05 21:56:54 +08:00
|
|
|
tcp_sk(child)->tsoffset = tsoff;
|
2015-10-09 02:16:48 +08:00
|
|
|
sock_rps_save_rxhash(child, skb);
|
2024-06-12 13:13:20 +08:00
|
|
|
|
|
|
|
if (rsk_drop_req(req)) {
|
|
|
|
reqsk_put(req);
|
|
|
|
return child;
|
|
|
|
}
|
|
|
|
|
2019-03-19 23:05:44 +08:00
|
|
|
if (inet_csk_reqsk_queue_add(sk, req, child))
|
|
|
|
return child;
|
|
|
|
|
|
|
|
bh_unlock_sock(child);
|
|
|
|
sock_put(child);
|
2015-03-18 09:32:31 +08:00
|
|
|
}
|
2019-03-19 23:05:44 +08:00
|
|
|
__reqsk_free(req);
|
|
|
|
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2015-06-05 09:30:43 +08:00
|
|
|
EXPORT_SYMBOL(tcp_get_cookie_sock);
|
2008-04-10 18:12:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* when syncookies are in effect and tcp timestamps are enabled we stored
|
|
|
|
* additional tcp options in the timestamp.
|
|
|
|
* This extracts these options from the timestamp echo.
|
|
|
|
*
|
syncookies: split cookie_check_timestamp() into two functions
The function cookie_check_timestamp(), both called from IPv4/6 context,
is being used to decode the echoed timestamp from the SYN/ACK into TCP
options used for follow-up communication with the peer.
We can remove ECN handling from that function, split it into a separate
one, and simply rename the original function into cookie_decode_options().
cookie_decode_options() just fills in tcp_option struct based on the
echoed timestamp received from the peer. Anything that fails in this
function will actually discard the request socket.
While this is the natural place for decoding options such as ECN which
commit 172d69e63c7f ("syncookies: add support for ECN") added, we argue
that in particular for ECN handling, it can be checked at a later point
in time as the request sock would actually not need to be dropped from
this, but just ECN support turned off.
Therefore, we split this functionality into cookie_ecn_ok(), which tells
us if the timestamp indicates ECN support AND the tcp_ecn sysctl is enabled.
This prepares for per-route ECN support: just looking at the tcp_ecn sysctl
won't be enough anymore at that point; if the timestamp indicates ECN
and sysctl tcp_ecn == 0, we will also need to check the ECN dst metric.
This would mean adding a route lookup to cookie_check_timestamp(), which
we definitely want to avoid. As we already do a route lookup at a later
point in cookie_{v4,v6}_check(), we can simply make use of that as well
for the new cookie_ecn_ok() function w/o any additional cost.
Joint work with Daniel Borkmann.
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-04 00:35:02 +08:00
|
|
|
* return false if we decode a tcp option that is disabled
|
|
|
|
* on the host.
|
2008-04-10 18:12:40 +08:00
|
|
|
*/
|
2017-06-08 01:34:37 +08:00
|
|
|
bool cookie_timestamp_decode(const struct net *net,
|
|
|
|
struct tcp_options_received *tcp_opt)
|
2008-04-10 18:12:40 +08:00
|
|
|
{
|
2010-06-21 19:48:44 +08:00
|
|
|
/* echoed timestamp, lowest bits contain options */
|
2014-11-04 00:35:01 +08:00
|
|
|
u32 options = tcp_opt->rcv_tsecr;
|
2008-04-10 18:12:40 +08:00
|
|
|
|
2010-06-17 05:42:15 +08:00
|
|
|
if (!tcp_opt->saw_tstamp) {
|
|
|
|
tcp_clear_options(tcp_opt);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps))
|
2010-06-17 05:42:15 +08:00
|
|
|
return false;
|
|
|
|
|
2014-11-04 00:35:01 +08:00
|
|
|
tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0;
|
2008-04-10 18:12:40 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
if (tcp_opt->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack))
|
2010-06-17 05:42:15 +08:00
|
|
|
return false;
|
2008-04-10 18:12:40 +08:00
|
|
|
|
2014-11-04 00:35:01 +08:00
|
|
|
if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK)
|
2010-06-21 19:48:44 +08:00
|
|
|
return true; /* no window scaling */
|
|
|
|
|
|
|
|
tcp_opt->wscale_ok = 1;
|
2014-11-04 00:35:01 +08:00
|
|
|
tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK;
|
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
return READ_ONCE(net->ipv4.sysctl_tcp_window_scaling) != 0;
|
2008-04-10 18:12:40 +08:00
|
|
|
}
|
syncookies: split cookie_check_timestamp() into two functions
The function cookie_check_timestamp(), both called from IPv4/6 context,
is being used to decode the echoed timestamp from the SYN/ACK into TCP
options used for follow-up communication with the peer.
We can remove ECN handling from that function, split it into a separate
one, and simply rename the original function into cookie_decode_options().
cookie_decode_options() just fills in tcp_option struct based on the
echoed timestamp received from the peer. Anything that fails in this
function will actually discard the request socket.
While this is the natural place for decoding options such as ECN which
commit 172d69e63c7f ("syncookies: add support for ECN") added, we argue
that in particular for ECN handling, it can be checked at a later point
in time as the request sock would actually not need to be dropped from
this, but just ECN support turned off.
Therefore, we split this functionality into cookie_ecn_ok(), which tells
us if the timestamp indicates ECN support AND the tcp_ecn sysctl is enabled.
This prepares for per-route ECN support: just looking at the tcp_ecn sysctl
won't be enough anymore at that point; if the timestamp indicates ECN
and sysctl tcp_ecn == 0, we will also need to check the ECN dst metric.
This would mean adding a route lookup to cookie_check_timestamp(), which
we definitely want to avoid. As we already do a route lookup at a later
point in cookie_{v4,v6}_check(), we can simply make use of that as well
for the new cookie_ecn_ok() function w/o any additional cost.
Joint work with Daniel Borkmann.
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-04 00:35:02 +08:00
|
|
|
EXPORT_SYMBOL(cookie_timestamp_decode);
|
|
|
|
|
|
|
|
bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
|
net: allow setting ecn via routing table
This patch allows to set ECN on a per-route basis in case the sysctl
tcp_ecn is not set to 1. In other words, when ECN is set for specific
routes, it provides a tcp_ecn=1 behaviour for that route while the rest
of the stack acts according to the global settings.
One can use 'ip route change dev $dev $net features ecn' to toggle this.
Having a more fine-grained per-route setting can be beneficial for various
reasons, for example, 1) within data centers, or 2) local ISPs may deploy
ECN support for their own video/streaming services [1], etc.
There was a recent measurement study/paper [2] which scanned the Alexa's
publicly available top million websites list from a vantage point in US,
Europe and Asia:
Half of the Alexa list will now happily use ECN (tcp_ecn=2, most likely
blamed to commit 255cac91c3 ("tcp: extend ECN sysctl to allow server-side
only ECN") ;)); the break in connectivity on-path was found is about
1 in 10,000 cases. Timeouts rather than receiving back RSTs were much
more common in the negotiation phase (and mostly seen in the Alexa
middle band, ranks around 50k-150k): from 12-thousand hosts on which
there _may_ be ECN-linked connection failures, only 79 failed with RST
when _not_ failing with RST when ECN is not requested.
It's unclear though, how much equipment in the wild actually marks CE
when buffers start to fill up.
We thought about a fallback to non-ECN for retransmitted SYNs as another
global option (which could perhaps one day be made default), but as Eric
points out, there's much more work needed to detect broken middleboxes.
Two examples Eric mentioned are buggy firewalls that accept only a single
SYN per flow, and middleboxes that successfully let an ECN flow establish,
but later mark CE for all packets (so cwnd converges to 1).
[1] http://www.ietf.org/proceedings/89/slides/slides-89-tsvarea-1.pdf, p.15
[2] http://ecn.ethz.ch/
Joint work with Daniel Borkmann.
Reference: http://thread.gmane.org/gmane.linux.network/335797
Suggested-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-04 00:35:03 +08:00
|
|
|
const struct net *net, const struct dst_entry *dst)
|
syncookies: split cookie_check_timestamp() into two functions
The function cookie_check_timestamp(), both called from IPv4/6 context,
is being used to decode the echoed timestamp from the SYN/ACK into TCP
options used for follow-up communication with the peer.
We can remove ECN handling from that function, split it into a separate
one, and simply rename the original function into cookie_decode_options().
cookie_decode_options() just fills in tcp_option struct based on the
echoed timestamp received from the peer. Anything that fails in this
function will actually discard the request socket.
While this is the natural place for decoding options such as ECN which
commit 172d69e63c7f ("syncookies: add support for ECN") added, we argue
that in particular for ECN handling, it can be checked at a later point
in time as the request sock would actually not need to be dropped from
this, but just ECN support turned off.
Therefore, we split this functionality into cookie_ecn_ok(), which tells
us if the timestamp indicates ECN support AND the tcp_ecn sysctl is enabled.
This prepares for per-route ECN support: just looking at the tcp_ecn sysctl
won't be enough anymore at that point; if the timestamp indicates ECN
and sysctl tcp_ecn == 0, we will also need to check the ECN dst metric.
This would mean adding a route lookup to cookie_check_timestamp(), which
we definitely want to avoid. As we already do a route lookup at a later
point in cookie_{v4,v6}_check(), we can simply make use of that as well
for the new cookie_ecn_ok() function w/o any additional cost.
Joint work with Daniel Borkmann.
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-04 00:35:02 +08:00
|
|
|
{
|
|
|
|
bool ecn_ok = tcp_opt->rcv_tsecr & TS_OPT_ECN;
|
|
|
|
|
|
|
|
if (!ecn_ok)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (net->ipv4.sysctl_tcp_ecn)
|
|
|
|
return true;
|
|
|
|
|
net: allow setting ecn via routing table
This patch allows to set ECN on a per-route basis in case the sysctl
tcp_ecn is not set to 1. In other words, when ECN is set for specific
routes, it provides a tcp_ecn=1 behaviour for that route while the rest
of the stack acts according to the global settings.
One can use 'ip route change dev $dev $net features ecn' to toggle this.
Having a more fine-grained per-route setting can be beneficial for various
reasons, for example, 1) within data centers, or 2) local ISPs may deploy
ECN support for their own video/streaming services [1], etc.
There was a recent measurement study/paper [2] which scanned the Alexa's
publicly available top million websites list from a vantage point in US,
Europe and Asia:
Half of the Alexa list will now happily use ECN (tcp_ecn=2, most likely
blamed to commit 255cac91c3 ("tcp: extend ECN sysctl to allow server-side
only ECN") ;)); the break in connectivity on-path was found is about
1 in 10,000 cases. Timeouts rather than receiving back RSTs were much
more common in the negotiation phase (and mostly seen in the Alexa
middle band, ranks around 50k-150k): from 12-thousand hosts on which
there _may_ be ECN-linked connection failures, only 79 failed with RST
when _not_ failing with RST when ECN is not requested.
It's unclear though, how much equipment in the wild actually marks CE
when buffers start to fill up.
We thought about a fallback to non-ECN for retransmitted SYNs as another
global option (which could perhaps one day be made default), but as Eric
points out, there's much more work needed to detect broken middleboxes.
Two examples Eric mentioned are buggy firewalls that accept only a single
SYN per flow, and middleboxes that successfully let an ECN flow establish,
but later mark CE for all packets (so cwnd converges to 1).
[1] http://www.ietf.org/proceedings/89/slides/slides-89-tsvarea-1.pdf, p.15
[2] http://ecn.ethz.ch/
Joint work with Daniel Borkmann.
Reference: http://thread.gmane.org/gmane.linux.network/335797
Suggested-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-04 00:35:03 +08:00
|
|
|
return dst_feature(dst, RTAX_FEATURE_ECN);
|
syncookies: split cookie_check_timestamp() into two functions
The function cookie_check_timestamp(), both called from IPv4/6 context,
is being used to decode the echoed timestamp from the SYN/ACK into TCP
options used for follow-up communication with the peer.
We can remove ECN handling from that function, split it into a separate
one, and simply rename the original function into cookie_decode_options().
cookie_decode_options() just fills in tcp_option struct based on the
echoed timestamp received from the peer. Anything that fails in this
function will actually discard the request socket.
While this is the natural place for decoding options such as ECN which
commit 172d69e63c7f ("syncookies: add support for ECN") added, we argue
that in particular for ECN handling, it can be checked at a later point
in time as the request sock would actually not need to be dropped from
this, but just ECN support turned off.
Therefore, we split this functionality into cookie_ecn_ok(), which tells
us if the timestamp indicates ECN support AND the tcp_ecn sysctl is enabled.
This prepares for per-route ECN support: just looking at the tcp_ecn sysctl
won't be enough anymore at that point; if the timestamp indicates ECN
and sysctl tcp_ecn == 0, we will also need to check the ECN dst metric.
This would mean adding a route lookup to cookie_check_timestamp(), which
we definitely want to avoid. As we already do a route lookup at a later
point in cookie_{v4,v6}_check(), we can simply make use of that as well
for the new cookie_ecn_ok() function w/o any additional cost.
Joint work with Daniel Borkmann.
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-04 00:35:02 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cookie_ecn_ok);
|
2008-04-10 18:12:40 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
|
|
|
|
struct sock *sk,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct tcp_request_sock *treq;
|
|
|
|
struct request_sock *req;
|
|
|
|
|
|
|
|
#ifdef CONFIG_MPTCP
|
|
|
|
if (sk_is_mptcp(sk))
|
|
|
|
ops = &mptcp_subflow_request_sock_ops;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
req = inet_reqsk_alloc(ops, sk, false);
|
|
|
|
if (!req)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
treq = tcp_rsk(req);
|
|
|
|
#if IS_ENABLED(CONFIG_MPTCP)
|
|
|
|
treq->is_mptcp = sk_is_mptcp(sk);
|
|
|
|
if (treq->is_mptcp) {
|
|
|
|
int err = mptcp_subflow_init_cookie_req(req, sk, skb);
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
reqsk_free(req);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(cookie_tcp_reqsk_alloc);
|
|
|
|
|
2015-10-03 02:43:32 +08:00
|
|
|
/* On input, sk is a listener.
|
|
|
|
* Output is listener if incoming packet would not create a child
|
|
|
|
* NULL if memory could not be allocated.
|
|
|
|
*/
|
2014-10-16 05:33:22 +08:00
|
|
|
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2014-10-16 05:33:22 +08:00
|
|
|
struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
|
2009-12-03 02:25:27 +08:00
|
|
|
struct tcp_options_received tcp_opt;
|
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 13:46:52 +08:00
|
|
|
struct inet_request_sock *ireq;
|
|
|
|
struct tcp_request_sock *treq;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
2007-04-11 12:04:22 +08:00
|
|
|
const struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
__u32 cookie = ntohl(th->ack_seq) - 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sock *ret = sk;
|
2007-02-09 22:24:47 +08:00
|
|
|
struct request_sock *req;
|
2024-06-11 20:26:44 +08:00
|
|
|
int full_space, mss;
|
2007-02-09 22:24:47 +08:00
|
|
|
struct rtable *rt;
|
2005-04-17 06:20:36 +08:00
|
|
|
__u8 rcv_wscale;
|
2012-03-10 17:20:21 +08:00
|
|
|
struct flowi4 fl4;
|
2017-05-05 21:56:54 +08:00
|
|
|
u32 tsoff = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) ||
|
|
|
|
!th->ack || th->rst)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
|
2014-10-30 09:55:38 +08:00
|
|
|
if (tcp_synq_no_recent_overflow(sk))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
|
|
|
|
if (mss == 0) {
|
2016-04-28 07:44:39 +08:00
|
|
|
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-04-28 07:44:39 +08:00
|
|
|
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
tcp: Revert per-route SACK/DSACK/TIMESTAMP changes.
It creates a regression, triggering badness for SYN_RECV
sockets, for example:
[19148.022102] Badness at net/ipv4/inet_connection_sock.c:293
[19148.022570] NIP: c02a0914 LR: c02a0904 CTR: 00000000
[19148.023035] REGS: eeecbd30 TRAP: 0700 Not tainted (2.6.32)
[19148.023496] MSR: 00029032 <EE,ME,CE,IR,DR> CR: 24002442 XER: 00000000
[19148.024012] TASK = eee9a820[1756] 'privoxy' THREAD: eeeca000
This is likely caused by the change in the 'estab' parameter
passed to tcp_parse_options() when invoked by the functions
in net/ipv4/tcp_minisocks.c
But even if that is fixed, the ->conn_request() changes made in
this patch series is fundamentally wrong. They try to use the
listening socket's 'dst' to probe the route settings. The
listening socket doesn't even have a route, and you can't
get the right route (the child request one) until much later
after we setup all of the state, and it must be done by hand.
This stuff really isn't ready, so the best thing to do is a
full revert. This reverts the following commits:
f55017a93f1a74d50244b1254b9a2bd7ac9bbf7d
022c3f7d82f0f1c68018696f2f027b87b9bb45c2
1aba721eba1d84a2defce45b950272cee1e6c72a
cda42ebd67ee5fdf09d7057b5a4584d36fe8a335
345cda2fd695534be5a4494f1b59da9daed33663
dc343475ed062e13fc260acccaab91d7d80fd5b2
05eaade2782fb0c90d3034fd7a7d5a16266182bb
6a2a2d6bf8581216e08be15fcb563cfd6c430e1e
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-12-16 12:56:42 +08:00
|
|
|
/* check for timestamp cookie support */
|
|
|
|
memset(&tcp_opt, 0, sizeof(tcp_opt));
|
2017-06-08 01:34:36 +08:00
|
|
|
tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
|
tcp: Revert per-route SACK/DSACK/TIMESTAMP changes.
It creates a regression, triggering badness for SYN_RECV
sockets, for example:
[19148.022102] Badness at net/ipv4/inet_connection_sock.c:293
[19148.022570] NIP: c02a0914 LR: c02a0904 CTR: 00000000
[19148.023035] REGS: eeecbd30 TRAP: 0700 Not tainted (2.6.32)
[19148.023496] MSR: 00029032 <EE,ME,CE,IR,DR> CR: 24002442 XER: 00000000
[19148.024012] TASK = eee9a820[1756] 'privoxy' THREAD: eeeca000
This is likely caused by the change in the 'estab' parameter
passed to tcp_parse_options() when invoked by the functions
in net/ipv4/tcp_minisocks.c
But even if that is fixed, the ->conn_request() changes made in
this patch series is fundamentally wrong. They try to use the
listening socket's 'dst' to probe the route settings. The
listening socket doesn't even have a route, and you can't
get the right route (the child request one) until much later
after we setup all of the state, and it must be done by hand.
This stuff really isn't ready, so the best thing to do is a
full revert. This reverts the following commits:
f55017a93f1a74d50244b1254b9a2bd7ac9bbf7d
022c3f7d82f0f1c68018696f2f027b87b9bb45c2
1aba721eba1d84a2defce45b950272cee1e6c72a
cda42ebd67ee5fdf09d7057b5a4584d36fe8a335
345cda2fd695534be5a4494f1b59da9daed33663
dc343475ed062e13fc260acccaab91d7d80fd5b2
05eaade2782fb0c90d3034fd7a7d5a16266182bb
6a2a2d6bf8581216e08be15fcb563cfd6c430e1e
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-12-16 12:56:42 +08:00
|
|
|
|
2017-05-05 21:56:54 +08:00
|
|
|
if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
|
2017-06-08 01:34:39 +08:00
|
|
|
tsoff = secure_tcp_ts_off(sock_net(sk),
|
|
|
|
ip_hdr(skb)->daddr,
|
|
|
|
ip_hdr(skb)->saddr);
|
2017-05-05 21:56:54 +08:00
|
|
|
tcp_opt.rcv_tsecr -= tsoff;
|
|
|
|
}
|
|
|
|
|
2017-06-08 01:34:37 +08:00
|
|
|
if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt))
|
2010-06-17 05:42:15 +08:00
|
|
|
goto out;
|
tcp: Revert per-route SACK/DSACK/TIMESTAMP changes.
It creates a regression, triggering badness for SYN_RECV
sockets, for example:
[19148.022102] Badness at net/ipv4/inet_connection_sock.c:293
[19148.022570] NIP: c02a0914 LR: c02a0904 CTR: 00000000
[19148.023035] REGS: eeecbd30 TRAP: 0700 Not tainted (2.6.32)
[19148.023496] MSR: 00029032 <EE,ME,CE,IR,DR> CR: 24002442 XER: 00000000
[19148.024012] TASK = eee9a820[1756] 'privoxy' THREAD: eeeca000
This is likely caused by the change in the 'estab' parameter
passed to tcp_parse_options() when invoked by the functions
in net/ipv4/tcp_minisocks.c
But even if that is fixed, the ->conn_request() changes made in
this patch series is fundamentally wrong. They try to use the
listening socket's 'dst' to probe the route settings. The
listening socket doesn't even have a route, and you can't
get the right route (the child request one) until much later
after we setup all of the state, and it must be done by hand.
This stuff really isn't ready, so the best thing to do is a
full revert. This reverts the following commits:
f55017a93f1a74d50244b1254b9a2bd7ac9bbf7d
022c3f7d82f0f1c68018696f2f027b87b9bb45c2
1aba721eba1d84a2defce45b950272cee1e6c72a
cda42ebd67ee5fdf09d7057b5a4584d36fe8a335
345cda2fd695534be5a4494f1b59da9daed33663
dc343475ed062e13fc260acccaab91d7d80fd5b2
05eaade2782fb0c90d3034fd7a7d5a16266182bb
6a2a2d6bf8581216e08be15fcb563cfd6c430e1e
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-12-16 12:56:42 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = NULL;
|
2024-06-12 13:13:20 +08:00
|
|
|
req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!req)
|
|
|
|
goto out;
|
|
|
|
|
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 13:46:52 +08:00
|
|
|
ireq = inet_rsk(req);
|
|
|
|
treq = tcp_rsk(req);
|
2024-06-12 13:13:20 +08:00
|
|
|
treq->af_specific = &tcp_request_sock_ipv4_ops;
|
2007-04-11 12:04:22 +08:00
|
|
|
treq->rcv_isn = ntohl(th->seq) - 1;
|
2007-02-09 22:24:47 +08:00
|
|
|
treq->snt_isn = cookie;
|
2016-12-01 18:32:06 +08:00
|
|
|
treq->ts_off = 0;
|
2017-07-17 18:35:58 +08:00
|
|
|
treq->txhash = net_tx_rndhash();
|
2005-04-17 06:20:36 +08:00
|
|
|
req->mss = mss;
|
2013-10-10 15:04:37 +08:00
|
|
|
ireq->ir_num = ntohs(th->dest);
|
|
|
|
ireq->ir_rmt_port = th->source;
|
2015-03-19 05:05:38 +08:00
|
|
|
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
|
|
|
|
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
|
net: support marking accepting TCP sockets
When using mark-based routing, sockets returned from accept()
may need to be marked differently depending on the incoming
connection request.
This is the case, for example, if different socket marks identify
different networks: a listening socket may want to accept
connections from all networks, but each connection should be
marked with the network that the request came in on, so that
subsequent packets are sent on the correct network.
This patch adds a sysctl to mark TCP sockets based on the fwmark
of the incoming SYN packet. If enabled, and an unmarked socket
receives a SYN, then the SYN packet's fwmark is written to the
connection's inet_request_sock, and later written back to the
accepted socket when the connection is established. If the
socket already has a nonzero mark, then the behaviour is the same
as it is today, i.e., the listening socket's fwmark is used.
Black-box tested using user-mode linux:
- IPv4/IPv6 SYN+ACK, FIN, etc. packets are routed based on the
mark of the incoming SYN packet.
- The socket returned by accept() is marked with the mark of the
incoming SYN packet.
- Tested with syncookies=1 and syncookies=2.
Signed-off-by: Lorenzo Colitti <lorenzo@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-05-14 01:17:35 +08:00
|
|
|
ireq->ir_mark = inet_request_mark(sk, skb);
|
tcp: Revert per-route SACK/DSACK/TIMESTAMP changes.
It creates a regression, triggering badness for SYN_RECV
sockets, for example:
[19148.022102] Badness at net/ipv4/inet_connection_sock.c:293
[19148.022570] NIP: c02a0914 LR: c02a0904 CTR: 00000000
[19148.023035] REGS: eeecbd30 TRAP: 0700 Not tainted (2.6.32)
[19148.023496] MSR: 00029032 <EE,ME,CE,IR,DR> CR: 24002442 XER: 00000000
[19148.024012] TASK = eee9a820[1756] 'privoxy' THREAD: eeeca000
This is likely caused by the change in the 'estab' parameter
passed to tcp_parse_options() when invoked by the functions
in net/ipv4/tcp_minisocks.c
But even if that is fixed, the ->conn_request() changes made in
this patch series is fundamentally wrong. They try to use the
listening socket's 'dst' to probe the route settings. The
listening socket doesn't even have a route, and you can't
get the right route (the child request one) until much later
after we setup all of the state, and it must be done by hand.
This stuff really isn't ready, so the best thing to do is a
full revert. This reverts the following commits:
f55017a93f1a74d50244b1254b9a2bd7ac9bbf7d
022c3f7d82f0f1c68018696f2f027b87b9bb45c2
1aba721eba1d84a2defce45b950272cee1e6c72a
cda42ebd67ee5fdf09d7057b5a4584d36fe8a335
345cda2fd695534be5a4494f1b59da9daed33663
dc343475ed062e13fc260acccaab91d7d80fd5b2
05eaade2782fb0c90d3034fd7a7d5a16266182bb
6a2a2d6bf8581216e08be15fcb563cfd6c430e1e
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-12-16 12:56:42 +08:00
|
|
|
ireq->snd_wscale = tcp_opt.snd_wscale;
|
|
|
|
ireq->sack_ok = tcp_opt.sack_ok;
|
|
|
|
ireq->wscale_ok = tcp_opt.wscale_ok;
|
|
|
|
ireq->tstamp_ok = tcp_opt.saw_tstamp;
|
|
|
|
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
|
2017-05-17 05:00:14 +08:00
|
|
|
treq->snt_synack = 0;
|
2015-03-18 09:32:29 +08:00
|
|
|
treq->tfo_listener = false;
|
2018-03-23 18:05:45 +08:00
|
|
|
if (IS_ENABLED(CONFIG_SMC))
|
|
|
|
ireq->smc_ok = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-12-17 05:20:44 +08:00
|
|
|
ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
|
2015-03-14 06:51:10 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* We throwed the options of the initial SYN away, so we hope
|
|
|
|
* the ACK carries the same options again (see RFC1122 4.2.3.8)
|
|
|
|
*/
|
2017-10-21 00:04:13 +08:00
|
|
|
RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-03-28 05:10:28 +08:00
|
|
|
if (security_inet_conn_request(sk, skb, req)) {
|
2015-03-18 09:32:31 +08:00
|
|
|
reqsk_free(req);
|
2009-03-28 05:10:28 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2012-10-28 07:16:46 +08:00
|
|
|
req->num_retrans = 0;
|
2007-02-09 22:24:47 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* We need to lookup the route here to get at the correct
|
|
|
|
* window size. We should better make sure that the window size
|
|
|
|
* hasn't changed since we received the original syn, but I see
|
2007-02-09 22:24:47 +08:00
|
|
|
* no easy way to do this.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2015-12-17 05:20:44 +08:00
|
|
|
flowi4_init_output(&fl4, ireq->ir_iif, ireq->ir_mark,
|
tcp: incoming connections might use wrong route under synflood
There is a bug in cookie_v4_check (net/ipv4/syncookies.c):
flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
RT_SCOPE_UNIVERSE, IPPROTO_TCP,
inet_sk_flowi_flags(sk),
(opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
ireq->loc_addr, th->source, th->dest);
Here we do not respect sk->sk_bound_dev_if, therefore wrong dst_entry may be
taken. This dst_entry is used by new socket (get_cookie_sock ->
tcp_v4_syn_recv_sock), so its packets may take the wrong path.
Signed-off-by: Dmitry Popov <dp@highloadlab.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-04-11 16:55:07 +08:00
|
|
|
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
|
2012-03-10 17:20:21 +08:00
|
|
|
inet_sk_flowi_flags(sk),
|
2014-10-16 05:33:22 +08:00
|
|
|
opt->srr ? opt->faddr : ireq->ir_rmt_addr,
|
2016-11-04 01:23:43 +08:00
|
|
|
ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
|
2012-03-10 17:20:21 +08:00
|
|
|
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
|
|
|
|
rt = ip_route_output_key(sock_net(sk), &fl4);
|
|
|
|
if (IS_ERR(rt)) {
|
2015-03-18 09:32:31 +08:00
|
|
|
reqsk_free(req);
|
2012-03-10 17:20:21 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to redo what tcp_v4_send_synack did. */
|
2015-10-09 10:33:23 +08:00
|
|
|
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
|
2024-06-11 20:26:44 +08:00
|
|
|
/* limit the window selection if the user enforce a smaller rx buffer */
|
|
|
|
full_space = tcp_full_space(sk);
|
|
|
|
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
|
|
|
|
(req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
|
|
|
|
req->rsk_window_clamp = full_space;
|
2008-04-10 18:12:40 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
tcp_select_initial_window(sk, full_space, req->mss,
|
2015-10-09 10:33:23 +08:00
|
|
|
&req->rsk_rcv_wnd, &req->rsk_window_clamp,
|
2009-12-15 19:15:28 +08:00
|
|
|
ireq->wscale_ok, &rcv_wscale,
|
2010-06-11 14:31:35 +08:00
|
|
|
dst_metric(&rt->dst, RTAX_INITRWND));
|
2008-04-10 18:12:40 +08:00
|
|
|
|
2007-02-09 22:24:47 +08:00
|
|
|
ireq->rcv_wscale = rcv_wscale;
|
net: allow setting ecn via routing table
This patch allows to set ECN on a per-route basis in case the sysctl
tcp_ecn is not set to 1. In other words, when ECN is set for specific
routes, it provides a tcp_ecn=1 behaviour for that route while the rest
of the stack acts according to the global settings.
One can use 'ip route change dev $dev $net features ecn' to toggle this.
Having a more fine-grained per-route setting can be beneficial for various
reasons, for example, 1) within data centers, or 2) local ISPs may deploy
ECN support for their own video/streaming services [1], etc.
There was a recent measurement study/paper [2] which scanned the Alexa's
publicly available top million websites list from a vantage point in US,
Europe and Asia:
Half of the Alexa list will now happily use ECN (tcp_ecn=2, most likely
blamed to commit 255cac91c3 ("tcp: extend ECN sysctl to allow server-side
only ECN") ;)); the break in connectivity on-path was found is about
1 in 10,000 cases. Timeouts rather than receiving back RSTs were much
more common in the negotiation phase (and mostly seen in the Alexa
middle band, ranks around 50k-150k): from 12-thousand hosts on which
there _may_ be ECN-linked connection failures, only 79 failed with RST
when _not_ failing with RST when ECN is not requested.
It's unclear though, how much equipment in the wild actually marks CE
when buffers start to fill up.
We thought about a fallback to non-ECN for retransmitted SYNs as another
global option (which could perhaps one day be made default), but as Eric
points out, there's much more work needed to detect broken middleboxes.
Two examples Eric mentioned are buggy firewalls that accept only a single
SYN per flow, and middleboxes that successfully let an ECN flow establish,
but later mark CE for all packets (so cwnd converges to 1).
[1] http://www.ietf.org/proceedings/89/slides/slides-89-tsvarea-1.pdf, p.15
[2] http://ecn.ethz.ch/
Joint work with Daniel Borkmann.
Reference: http://thread.gmane.org/gmane.linux.network/335797
Suggested-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-04 00:35:03 +08:00
|
|
|
ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-05-05 21:56:54 +08:00
|
|
|
ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff);
|
2012-03-10 17:20:21 +08:00
|
|
|
/* ip_queue_xmit() depends on our flow being setup
|
|
|
|
* Normal sockets get it right from inet_csk_route_child_sock()
|
|
|
|
*/
|
|
|
|
if (ret)
|
|
|
|
inet_sk(ret)->cork.fl.u.ip4 = fl4;
|
2005-04-17 06:20:36 +08:00
|
|
|
out: return ret;
|
|
|
|
}
|