Merge branch 'net-enhancements-to-sk_user_data-field'
Hawkins Jiawei says: ==================== net: enhancements to sk_user_data field This patchset fixes refcount bug by adding SK_USER_DATA_PSOCK flag bit in sk_user_data field. The bug cause following info: WARNING: CPU: 1 PID: 3605 at lib/refcount.c:19 refcount_warn_saturate+0xf4/0x1e0 lib/refcount.c:19 Modules linked in: CPU: 1 PID: 3605 Comm: syz-executor208 Not tainted 5.18.0-syzkaller-03023-g7e062cda7d90 #0 <TASK> __refcount_add_not_zero include/linux/refcount.h:163 [inline] __refcount_inc_not_zero include/linux/refcount.h:227 [inline] refcount_inc_not_zero include/linux/refcount.h:245 [inline] sk_psock_get+0x3bc/0x410 include/linux/skmsg.h:439 tls_data_ready+0x6d/0x1b0 net/tls/tls_sw.c:2091 tcp_data_ready+0x106/0x520 net/ipv4/tcp_input.c:4983 tcp_data_queue+0x25f2/0x4c90 net/ipv4/tcp_input.c:5057 tcp_rcv_state_process+0x1774/0x4e80 net/ipv4/tcp_input.c:6659 tcp_v4_do_rcv+0x339/0x980 net/ipv4/tcp_ipv4.c:1682 sk_backlog_rcv include/net/sock.h:1061 [inline] __release_sock+0x134/0x3b0 net/core/sock.c:2849 release_sock+0x54/0x1b0 net/core/sock.c:3404 inet_shutdown+0x1e0/0x430 net/ipv4/af_inet.c:909 __sys_shutdown_sock net/socket.c:2331 [inline] __sys_shutdown_sock net/socket.c:2325 [inline] __sys_shutdown+0xf1/0x1b0 net/socket.c:2343 __do_sys_shutdown net/socket.c:2351 [inline] __se_sys_shutdown net/socket.c:2349 [inline] __x64_sys_shutdown+0x50/0x70 net/socket.c:2349 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x46/0xb0 </TASK> To improve code maintainability, this patchset refactors sk_user_data flags code to be more generic. ==================== Link: https://lore.kernel.org/r/cover.1659676823.git.yin31149@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
dd48f3832d
|
@ -278,7 +278,8 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
|
||||||
|
|
||||||
static inline struct sk_psock *sk_psock(const struct sock *sk)
|
static inline struct sk_psock *sk_psock(const struct sock *sk)
|
||||||
{
|
{
|
||||||
return rcu_dereference_sk_user_data(sk);
|
return __rcu_dereference_sk_user_data_with_flags(sk,
|
||||||
|
SK_USER_DATA_PSOCK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void sk_psock_set_state(struct sk_psock *psock,
|
static inline void sk_psock_set_state(struct sk_psock *psock,
|
||||||
|
|
|
@ -545,14 +545,26 @@ enum sk_pacing {
|
||||||
SK_PACING_FQ = 2,
|
SK_PACING_FQ = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Pointer stored in sk_user_data might not be suitable for copying
|
/* flag bits in sk_user_data
|
||||||
* when cloning the socket. For instance, it can point to a reference
|
*
|
||||||
* counted object. sk_user_data bottom bit is set if pointer must not
|
* - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might
|
||||||
* be copied.
|
* not be suitable for copying when cloning the socket. For instance,
|
||||||
|
* it can point to a reference counted object. sk_user_data bottom
|
||||||
|
* bit is set if pointer must not be copied.
|
||||||
|
*
|
||||||
|
* - SK_USER_DATA_BPF: Mark whether sk_user_data field is
|
||||||
|
* managed/owned by a BPF reuseport array. This bit should be set
|
||||||
|
* when sk_user_data's sk is added to the bpf's reuseport_array.
|
||||||
|
*
|
||||||
|
* - SK_USER_DATA_PSOCK: Mark whether pointer stored in
|
||||||
|
* sk_user_data points to psock type. This bit should be set
|
||||||
|
* when sk_user_data is assigned to a psock object.
|
||||||
*/
|
*/
|
||||||
#define SK_USER_DATA_NOCOPY 1UL
|
#define SK_USER_DATA_NOCOPY 1UL
|
||||||
#define SK_USER_DATA_BPF 2UL /* Managed by BPF */
|
#define SK_USER_DATA_BPF 2UL
|
||||||
#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
|
#define SK_USER_DATA_PSOCK 4UL
|
||||||
|
#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
|
||||||
|
SK_USER_DATA_PSOCK)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
|
* sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
|
||||||
|
@ -565,24 +577,40 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk)
|
||||||
|
|
||||||
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
|
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __rcu_dereference_sk_user_data_with_flags - return the pointer
|
||||||
|
* only if argument flags all has been set in sk_user_data. Otherwise
|
||||||
|
* return NULL
|
||||||
|
*
|
||||||
|
* @sk: socket
|
||||||
|
* @flags: flag bits
|
||||||
|
*/
|
||||||
|
static inline void *
|
||||||
|
__rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
|
||||||
|
uintptr_t flags)
|
||||||
|
{
|
||||||
|
uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
|
||||||
|
|
||||||
|
WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
|
||||||
|
|
||||||
|
if ((sk_user_data & flags) == flags)
|
||||||
|
return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#define rcu_dereference_sk_user_data(sk) \
|
#define rcu_dereference_sk_user_data(sk) \
|
||||||
|
__rcu_dereference_sk_user_data_with_flags(sk, 0)
|
||||||
|
#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
|
||||||
({ \
|
({ \
|
||||||
void *__tmp = rcu_dereference(__sk_user_data((sk))); \
|
uintptr_t __tmp1 = (uintptr_t)(ptr), \
|
||||||
(void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \
|
__tmp2 = (uintptr_t)(flags); \
|
||||||
|
WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \
|
||||||
|
WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \
|
||||||
|
rcu_assign_pointer(__sk_user_data((sk)), \
|
||||||
|
__tmp1 | __tmp2); \
|
||||||
})
|
})
|
||||||
#define rcu_assign_sk_user_data(sk, ptr) \
|
#define rcu_assign_sk_user_data(sk, ptr) \
|
||||||
({ \
|
__rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
|
||||||
uintptr_t __tmp = (uintptr_t)(ptr); \
|
|
||||||
WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
|
|
||||||
rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
|
|
||||||
})
|
|
||||||
#define rcu_assign_sk_user_data_nocopy(sk, ptr) \
|
|
||||||
({ \
|
|
||||||
uintptr_t __tmp = (uintptr_t)(ptr); \
|
|
||||||
WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
|
|
||||||
rcu_assign_pointer(__sk_user_data((sk)), \
|
|
||||||
__tmp | SK_USER_DATA_NOCOPY); \
|
|
||||||
})
|
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
struct net *sock_net(const struct sock *sk)
|
struct net *sock_net(const struct sock *sk)
|
||||||
|
|
|
@ -21,14 +21,11 @@ static struct reuseport_array *reuseport_array(struct bpf_map *map)
|
||||||
/* The caller must hold the reuseport_lock */
|
/* The caller must hold the reuseport_lock */
|
||||||
void bpf_sk_reuseport_detach(struct sock *sk)
|
void bpf_sk_reuseport_detach(struct sock *sk)
|
||||||
{
|
{
|
||||||
uintptr_t sk_user_data;
|
struct sock __rcu **socks;
|
||||||
|
|
||||||
write_lock_bh(&sk->sk_callback_lock);
|
write_lock_bh(&sk->sk_callback_lock);
|
||||||
sk_user_data = (uintptr_t)sk->sk_user_data;
|
socks = __rcu_dereference_sk_user_data_with_flags(sk, SK_USER_DATA_BPF);
|
||||||
if (sk_user_data & SK_USER_DATA_BPF) {
|
if (socks) {
|
||||||
struct sock __rcu **socks;
|
|
||||||
|
|
||||||
socks = (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
|
|
||||||
WRITE_ONCE(sk->sk_user_data, NULL);
|
WRITE_ONCE(sk->sk_user_data, NULL);
|
||||||
/*
|
/*
|
||||||
* Do not move this NULL assignment outside of
|
* Do not move this NULL assignment outside of
|
||||||
|
|
|
@ -739,7 +739,9 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
|
||||||
sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
|
sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
|
||||||
refcount_set(&psock->refcnt, 1);
|
refcount_set(&psock->refcnt, 1);
|
||||||
|
|
||||||
rcu_assign_sk_user_data_nocopy(sk, psock);
|
__rcu_assign_sk_user_data_with_flags(sk, psock,
|
||||||
|
SK_USER_DATA_NOCOPY |
|
||||||
|
SK_USER_DATA_PSOCK);
|
||||||
sock_hold(sk);
|
sock_hold(sk);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
|
Loading…
Reference in New Issue