Merge branch 'sock-lockdep-tightening'
Hannes Frederic Sowa says: ==================== sock: lockdep tightening First patch is from Eric Dumazet and improves lockdep accuracy for socket locks. After that, second patch introduces lockdep_sock_is_held and uses it. Final patch reverts and reworks the lockdep fix from Daniel in the filter code, as we now have tighter lockdep support. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1fbbe1a8a9
|
@ -622,8 +622,9 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
|
|||
|
||||
/* Re-attach the filter to persist device */
|
||||
if (!skip_filter && (tun->filter_attached == true)) {
|
||||
err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
|
||||
lockdep_rtnl_is_held());
|
||||
lock_sock(tfile->socket.sk);
|
||||
err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
|
||||
release_sock(tfile->socket.sk);
|
||||
if (!err)
|
||||
goto out;
|
||||
}
|
||||
|
@ -1824,7 +1825,9 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
|
|||
|
||||
for (i = 0; i < n; i++) {
|
||||
tfile = rtnl_dereference(tun->tfiles[i]);
|
||||
__sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
|
||||
lock_sock(tfile->socket.sk);
|
||||
sk_detach_filter(tfile->socket.sk);
|
||||
release_sock(tfile->socket.sk);
|
||||
}
|
||||
|
||||
tun->filter_attached = false;
|
||||
|
@ -1837,8 +1840,9 @@ static int tun_attach_filter(struct tun_struct *tun)
|
|||
|
||||
for (i = 0; i < tun->numqueues; i++) {
|
||||
tfile = rtnl_dereference(tun->tfiles[i]);
|
||||
ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
|
||||
lockdep_rtnl_is_held());
|
||||
lock_sock(tfile->socket.sk);
|
||||
ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
|
||||
release_sock(tfile->socket.sk);
|
||||
if (ret) {
|
||||
tun_detach_filter(tun, i);
|
||||
return ret;
|
||||
|
|
|
@ -465,14 +465,10 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
|
|||
void bpf_prog_destroy(struct bpf_prog *fp);
|
||||
|
||||
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
||||
int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
|
||||
bool locked);
|
||||
int sk_attach_bpf(u32 ufd, struct sock *sk);
|
||||
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
||||
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
|
||||
int sk_detach_filter(struct sock *sk);
|
||||
int __sk_detach_filter(struct sock *sk, bool locked);
|
||||
|
||||
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
|
||||
unsigned int len);
|
||||
|
||||
|
|
|
@ -1333,7 +1333,12 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
static inline void sock_release_ownership(struct sock *sk)
|
||||
{
|
||||
sk->sk_lock.owned = 0;
|
||||
if (sk->sk_lock.owned) {
|
||||
sk->sk_lock.owned = 0;
|
||||
|
||||
/* The sk_lock has mutex_unlock() semantics: */
|
||||
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1355,6 +1360,14 @@ do { \
|
|||
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
|
||||
} while (0)
|
||||
|
||||
static bool lockdep_sock_is_held(const struct sock *csk)
|
||||
{
|
||||
struct sock *sk = (struct sock *)csk;
|
||||
|
||||
return lockdep_is_held(&sk->sk_lock) ||
|
||||
lockdep_is_held(&sk->sk_lock.slock);
|
||||
}
|
||||
|
||||
void lock_sock_nested(struct sock *sk, int subclass);
|
||||
|
||||
static inline void lock_sock(struct sock *sk)
|
||||
|
@ -1593,8 +1606,8 @@ static inline void sk_rethink_txhash(struct sock *sk)
|
|||
static inline struct dst_entry *
|
||||
__sk_dst_get(struct sock *sk)
|
||||
{
|
||||
return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
|
||||
lockdep_is_held(&sk->sk_lock.slock));
|
||||
return rcu_dereference_check(sk->sk_dst_cache,
|
||||
lockdep_sock_is_held(sk));
|
||||
}
|
||||
|
||||
static inline struct dst_entry *
|
||||
|
|
|
@ -1149,8 +1149,7 @@ void bpf_prog_destroy(struct bpf_prog *fp)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_destroy);
|
||||
|
||||
static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
|
||||
bool locked)
|
||||
static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
|
||||
{
|
||||
struct sk_filter *fp, *old_fp;
|
||||
|
||||
|
@ -1166,8 +1165,10 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
old_fp = rcu_dereference_protected(sk->sk_filter, locked);
|
||||
old_fp = rcu_dereference_protected(sk->sk_filter,
|
||||
lockdep_sock_is_held(sk));
|
||||
rcu_assign_pointer(sk->sk_filter, fp);
|
||||
|
||||
if (old_fp)
|
||||
sk_filter_uncharge(sk, old_fp);
|
||||
|
||||
|
@ -1246,8 +1247,7 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
|
|||
* occurs or there is insufficient memory for the filter a negative
|
||||
* errno code is returned. On success the return is zero.
|
||||
*/
|
||||
int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
|
||||
bool locked)
|
||||
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
{
|
||||
struct bpf_prog *prog = __get_filter(fprog, sk);
|
||||
int err;
|
||||
|
@ -1255,7 +1255,7 @@ int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
|
|||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
err = __sk_attach_prog(prog, sk, locked);
|
||||
err = __sk_attach_prog(prog, sk);
|
||||
if (err < 0) {
|
||||
__bpf_prog_release(prog);
|
||||
return err;
|
||||
|
@ -1263,12 +1263,7 @@ int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__sk_attach_filter);
|
||||
|
||||
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
{
|
||||
return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_attach_filter);
|
||||
|
||||
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
{
|
||||
|
@ -1314,7 +1309,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
|
|||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
|
||||
err = __sk_attach_prog(prog, sk);
|
||||
if (err < 0) {
|
||||
bpf_prog_put(prog);
|
||||
return err;
|
||||
|
@ -2255,7 +2250,7 @@ static int __init register_sk_filter_ops(void)
|
|||
}
|
||||
late_initcall(register_sk_filter_ops);
|
||||
|
||||
int __sk_detach_filter(struct sock *sk, bool locked)
|
||||
int sk_detach_filter(struct sock *sk)
|
||||
{
|
||||
int ret = -ENOENT;
|
||||
struct sk_filter *filter;
|
||||
|
@ -2263,7 +2258,8 @@ int __sk_detach_filter(struct sock *sk, bool locked)
|
|||
if (sock_flag(sk, SOCK_FILTER_LOCKED))
|
||||
return -EPERM;
|
||||
|
||||
filter = rcu_dereference_protected(sk->sk_filter, locked);
|
||||
filter = rcu_dereference_protected(sk->sk_filter,
|
||||
lockdep_sock_is_held(sk));
|
||||
if (filter) {
|
||||
RCU_INIT_POINTER(sk->sk_filter, NULL);
|
||||
sk_filter_uncharge(sk, filter);
|
||||
|
@ -2272,12 +2268,7 @@ int __sk_detach_filter(struct sock *sk, bool locked)
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__sk_detach_filter);
|
||||
|
||||
int sk_detach_filter(struct sock *sk)
|
||||
{
|
||||
return __sk_detach_filter(sk, sock_owned_by_user(sk));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_detach_filter);
|
||||
|
||||
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
|
||||
unsigned int len)
|
||||
|
@ -2288,7 +2279,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
|
|||
|
||||
lock_sock(sk);
|
||||
filter = rcu_dereference_protected(sk->sk_filter,
|
||||
sock_owned_by_user(sk));
|
||||
lockdep_sock_is_held(sk));
|
||||
if (!filter)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -2483,11 +2483,6 @@ EXPORT_SYMBOL(lock_sock_nested);
|
|||
|
||||
void release_sock(struct sock *sk)
|
||||
{
|
||||
/*
|
||||
* The sk_lock has mutex_unlock() semantics:
|
||||
*/
|
||||
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
|
||||
|
||||
spin_lock_bh(&sk->sk_lock.slock);
|
||||
if (sk->sk_backlog.tail)
|
||||
__release_sock(sk);
|
||||
|
|
|
@ -62,7 +62,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
nexthop = daddr = usin->sin_addr.s_addr;
|
||||
|
||||
inet_opt = rcu_dereference_protected(inet->inet_opt,
|
||||
sock_owned_by_user(sk));
|
||||
lockdep_sock_is_held(sk));
|
||||
if (inet_opt != NULL && inet_opt->opt.srr) {
|
||||
if (daddr == 0)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -868,7 +868,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
|||
fl6.fl6_sport = inet->inet_sport;
|
||||
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
|
||||
|
||||
opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
|
||||
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
|
||||
final_p = fl6_update_dst(&fl6, opt, &final);
|
||||
|
||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
||||
|
|
|
@ -1107,7 +1107,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
|
|||
struct ip_options_rcu *inet_opt;
|
||||
|
||||
inet_opt = rcu_dereference_protected(inet->inet_opt,
|
||||
sock_owned_by_user(sk));
|
||||
lockdep_sock_is_held(sk));
|
||||
if (inet_opt && inet_opt->opt.srr)
|
||||
daddr = inet_opt->opt.faddr;
|
||||
|
||||
|
|
|
@ -1933,7 +1933,8 @@ int cipso_v4_sock_setattr(struct sock *sk,
|
|||
|
||||
sk_inet = inet_sk(sk);
|
||||
|
||||
old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));
|
||||
old = rcu_dereference_protected(sk_inet->inet_opt,
|
||||
lockdep_sock_is_held(sk));
|
||||
if (sk_inet->is_icsk) {
|
||||
sk_conn = inet_csk(sk);
|
||||
if (old)
|
||||
|
|
|
@ -642,7 +642,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
if (err)
|
||||
break;
|
||||
old = rcu_dereference_protected(inet->inet_opt,
|
||||
sock_owned_by_user(sk));
|
||||
lockdep_sock_is_held(sk));
|
||||
if (inet->is_icsk) {
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
@ -1302,7 +1302,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
|
|||
struct ip_options_rcu *inet_opt;
|
||||
|
||||
inet_opt = rcu_dereference_protected(inet->inet_opt,
|
||||
sock_owned_by_user(sk));
|
||||
lockdep_sock_is_held(sk));
|
||||
opt->optlen = 0;
|
||||
if (inet_opt)
|
||||
memcpy(optbuf, &inet_opt->opt,
|
||||
|
|
|
@ -157,7 +157,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
|
||||
nexthop = daddr = usin->sin_addr.s_addr;
|
||||
inet_opt = rcu_dereference_protected(inet->inet_opt,
|
||||
sock_owned_by_user(sk));
|
||||
lockdep_sock_is_held(sk));
|
||||
if (inet_opt && inet_opt->opt.srr) {
|
||||
if (!daddr)
|
||||
return -EINVAL;
|
||||
|
@ -882,8 +882,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
|
|||
|
||||
/* caller either holds rcu_read_lock() or socket lock */
|
||||
md5sig = rcu_dereference_check(tp->md5sig_info,
|
||||
sock_owned_by_user(sk) ||
|
||||
lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
|
||||
lockdep_sock_is_held(sk));
|
||||
if (!md5sig)
|
||||
return NULL;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
@ -928,8 +927,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
|||
}
|
||||
|
||||
md5sig = rcu_dereference_protected(tp->md5sig_info,
|
||||
sock_owned_by_user(sk) ||
|
||||
lockdep_is_held(&sk->sk_lock.slock));
|
||||
lockdep_sock_is_held(sk));
|
||||
if (!md5sig) {
|
||||
md5sig = kmalloc(sizeof(*md5sig), gfp);
|
||||
if (!md5sig)
|
||||
|
|
|
@ -407,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
|
|||
if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
|
||||
break;
|
||||
|
||||
opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
|
||||
opt = rcu_dereference_protected(np->opt,
|
||||
lockdep_sock_is_held(sk));
|
||||
opt = ipv6_renew_options(sk, opt, optname,
|
||||
(struct ipv6_opt_hdr __user *)optval,
|
||||
optlen);
|
||||
|
@ -1124,7 +1125,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
|||
struct ipv6_txoptions *opt;
|
||||
|
||||
lock_sock(sk);
|
||||
opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
|
||||
opt = rcu_dereference_protected(np->opt,
|
||||
lockdep_sock_is_held(sk));
|
||||
len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
|
||||
release_sock(sk);
|
||||
/* check if ipv6_getsockopt_sticky() returns err code */
|
||||
|
|
|
@ -234,7 +234,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
|||
fl6.fl6_dport = usin->sin6_port;
|
||||
fl6.fl6_sport = inet->inet_sport;
|
||||
|
||||
opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
|
||||
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
|
||||
final_p = fl6_update_dst(&fl6, opt, &final);
|
||||
|
||||
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
|
||||
|
|
|
@ -1046,7 +1046,7 @@ static int sock_fasync(int fd, struct file *filp, int on)
|
|||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk));
|
||||
wq = rcu_dereference_protected(sock->wq, lockdep_sock_is_held(sk));
|
||||
fasync_helper(fd, filp, on, &wq->fasync_list);
|
||||
|
||||
if (!wq->fasync_list)
|
||||
|
|
Loading…
Reference in New Issue