net: defer call to cgroup_sk_alloc()

sk_clone_lock() might run while TCP/DCCP listener already vanished.

In order to prevent use after free, it is better to defer cgroup_sk_alloc()
to the point we know both parent and child exist, and from process context.

Fixes: e994b2f0fb ("tcp: do not lock listener to process SYN packets")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2017-10-08 21:44:52 -07:00 committed by David S. Miller
parent 9f1c2674b3
commit fbb1fb4ad4
3 changed files with 6 additions and 13 deletions

View File

@ -5709,17 +5709,6 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
if (cgroup_sk_alloc_disabled)
return;
/* Socket clone path */
if (skcd->val) {
/*
* We might be cloning a socket which is left in an empty
* cgroup and the cgroup might have already been rmdir'd.
* Don't use cgroup_get_live().
*/
cgroup_get(sock_cgroup_ptr(skcd));
return;
}
rcu_read_lock();
while (true) {

View File

@ -1680,6 +1680,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
/* sk->sk_memcg will be populated at accept() time */
newsk->sk_memcg = NULL;
memset(&newsk->sk_cgrp_data, 0, sizeof(newsk->sk_cgrp_data));
atomic_set(&newsk->sk_drops, 0);
newsk->sk_send_head = NULL;
@ -1718,8 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_incoming_cpu = raw_smp_processor_id();
atomic64_set(&newsk->sk_cookie, 0);
cgroup_sk_alloc(&newsk->sk_cgrp_data);
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.txt for details)

View File

@ -26,6 +26,8 @@
#include <net/tcp.h>
#include <net/sock_reuseport.h>
#include <net/addrconf.h>
#include <net/cls_cgroup.h>
#include <net/netprio_cgroup.h>
#ifdef INET_CSK_DEBUG
const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
@ -476,6 +478,9 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
spin_unlock_bh(&queue->fastopenq.lock);
}
mem_cgroup_sk_alloc(newsk);
cgroup_sk_alloc(&newsk->sk_cgrp_data);
sock_update_classid(&newsk->sk_cgrp_data);
sock_update_netprioidx(&newsk->sk_cgrp_data);
out:
release_sock(sk);
if (req)