Merge branch 'mptcp-fixes-for-6-3'

Matthieu Baerts says:

====================
mptcp: fixes for 6.3

Patch 1 fixes a possible deadlock in subflow_error_report() reported by
lockdep. The report was in fact a false positive but the modification
makes sense and silences lockdep to allow syzkaller to find real issues.
The regression has been introduced in v5.12.

Patch 2 is a refactoring needed to be able to fix the two next issues.
It improves the situation and can be backported up to v6.0.

Patches 3 and 4 fix UaF reported by KASAN. It fixes issues potentially
visible since v5.7 and v5.19 but only reproducible until recently
(v6.0). These two patches depend on patch 2/7.

Patch 5 fixes the order of the printed values: expected vs seen values.
The regression has been introduced recently: v6.3-rc1.

Patch 6 adds missing ro_after_init flags. A previous patch added them
for other functions but these two have been missed. This previous patch
has been backported to stable versions (up to v5.12) so probably better
to do the same here.

Patch 7 fixes tcp_set_state() being called twice in a row since v5.10.

Patch 8 fixes another lockdep false positive issue but this time in
MPTCP PM code. Same here, some modifications in the code has been made
to silence this issue and help finding real ones later. This issue can
be seen since v6.2.

v1: https://lore.kernel.org/r/20230227-upstream-net-20230227-mptcp-fixes-v1-0-070e30ae4a8e@tessares.net
====================

Link: https://lore.kernel.org/r/20230227-upstream-net-20230227-mptcp-fixes-v2-0-47c2e95eada9@tessares.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-03-10 21:42:58 -08:00
commit dee85ac02c
5 changed files with 95 additions and 121 deletions

View File

@ -997,9 +997,13 @@ out:
return ret;
}
static struct lock_class_key mptcp_slock_keys[2];
static struct lock_class_key mptcp_keys[2];
static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
struct mptcp_pm_addr_entry *entry)
{
bool is_ipv6 = sk->sk_family == AF_INET6;
int addrlen = sizeof(struct sockaddr_in);
struct sockaddr_storage addr;
struct socket *ssock;
@ -1016,6 +1020,18 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
if (!newsk)
return -EINVAL;
/* The subflow socket lock is acquired in a nested to the msk one
* in several places, even by the TCP stack, and this msk is a kernel
* socket: lockdep complains. Instead of propagating the _nested
* modifiers in several places, re-init the lock class for the msk
* socket to an mptcp specific one.
*/
sock_lock_init_class_and_name(newsk,
is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET",
&mptcp_slock_keys[is_ipv6],
is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET",
&mptcp_keys[is_ipv6]);
lock_sock(newsk);
ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
release_sock(newsk);

View File

@ -825,7 +825,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
if (sk->sk_socket && !ssk->sk_socket)
mptcp_sock_graft(ssk, sk->sk_socket);
mptcp_propagate_sndbuf((struct sock *)msk, ssk);
mptcp_sockopt_sync_locked(msk, ssk);
return true;
}
@ -2343,7 +2342,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
goto out;
}
sock_orphan(ssk);
subflow->disposable = 1;
/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
@ -2351,15 +2349,25 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
* reference owned by msk;
*/
if (!inet_csk(ssk)->icsk_ulp_ops) {
WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
kfree_rcu(subflow, rcu);
} else if (msk->in_accept_queue && msk->first == ssk) {
/* if the first subflow moved to a close state, e.g. due to
* incoming reset and we reach here before inet_child_forget()
* the TCP stack could later try to close it via
* inet_csk_listen_stop(), or deliver it to the user space via
* accept().
* We can't delete the subflow - or risk a double free - nor let
* the msk survive - or will be leaked in the non accept scenario:
* fallback and let TCP cope with the subflow cleanup.
*/
WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD));
mptcp_subflow_drop_ctx(ssk);
} else {
/* otherwise tcp will dispose of the ssk and subflow ctx */
if (ssk->sk_state == TCP_LISTEN) {
tcp_set_state(ssk, TCP_CLOSE);
mptcp_subflow_queue_clean(sk, ssk);
inet_csk_listen_stop(ssk);
if (ssk->sk_state == TCP_LISTEN)
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
}
__tcp_close(ssk, 0);
/* close acquired an extra ref */
@ -2399,9 +2407,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
return 0;
}
static void __mptcp_close_subflow(struct mptcp_sock *msk)
static void __mptcp_close_subflow(struct sock *sk)
{
struct mptcp_subflow_context *subflow, *tmp;
struct mptcp_sock *msk = mptcp_sk(sk);
might_sleep();
@ -2415,7 +2424,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
continue;
mptcp_close_ssk((struct sock *)msk, ssk, subflow);
mptcp_close_ssk(sk, ssk, subflow);
}
/* if the MPC subflow has been closed before the msk is accepted,
* msk will never be accept-ed, close it now
*/
if (!msk->first && msk->in_accept_queue) {
sock_set_flag(sk, SOCK_DEAD);
inet_sk_state_store(sk, TCP_CLOSE);
}
}
@ -2624,6 +2641,9 @@ static void mptcp_worker(struct work_struct *work)
__mptcp_check_send_data_fin(sk);
mptcp_check_data_fin(sk);
if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
__mptcp_close_subflow(sk);
/* There is no point in keeping around an orphaned sk timedout or
* closed, but we need the msk around to reply to incoming DATA_FIN,
* even if it is orphaned and in FIN_WAIT2 state
@ -2639,9 +2659,6 @@ static void mptcp_worker(struct work_struct *work)
}
}
if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
__mptcp_close_subflow(msk);
if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
__mptcp_retrans(sk);
@ -3079,6 +3096,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
msk->local_key = subflow_req->local_key;
msk->token = subflow_req->token;
msk->subflow = NULL;
msk->in_accept_queue = 1;
WRITE_ONCE(msk->fully_established, false);
if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
WRITE_ONCE(msk->csum_enabled, true);
@ -3096,8 +3114,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
security_inet_csk_clone(nsk, req);
bh_unlock_sock(nsk);
/* keep a single reference */
__sock_put(nsk);
/* note: the newly allocated socket refcount is 2 now */
return nsk;
}
@ -3153,8 +3170,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
goto out;
}
/* acquire the 2nd reference for the owning socket */
sock_hold(new_mptcp_sock);
newsk = new_mptcp_sock;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
} else {
@ -3705,25 +3720,10 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
struct sock *newsk = newsock->sk;
set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
msk->in_accept_queue = 0;
lock_sock(newsk);
/* PM/worker can now acquire the first subflow socket
* lock without racing with listener queue cleanup,
* we can notify it, if needed.
*
* Even if remote has reset the initial subflow by now
* the refcnt is still at least one.
*/
subflow = mptcp_subflow_ctx(msk->first);
list_add(&subflow->node, &msk->conn_list);
sock_hold(msk->first);
if (mptcp_is_fully_established(newsk))
mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
mptcp_rcv_space_init(msk, msk->first);
mptcp_propagate_sndbuf(newsk, msk->first);
/* set ssk->sk_socket of accept()ed flows to mptcp socket.
* This is needed so NOSPACE flag can be set from tcp stack.
*/

View File

@ -295,7 +295,8 @@ struct mptcp_sock {
u8 recvmsg_inq:1,
cork:1,
nodelay:1,
fastopening:1;
fastopening:1,
in_accept_queue:1;
int connect_flags;
struct work_struct work;
struct sk_buff *ooo_last_skb;
@ -628,7 +629,6 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
struct mptcp_subflow_context *subflow);
void __mptcp_subflow_send_ack(struct sock *ssk);
void mptcp_subflow_reset(struct sock *ssk);
void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
bool __mptcp_close(struct sock *sk, long timeout);
@ -666,6 +666,8 @@ void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow);
bool mptcp_subflow_active(struct mptcp_subflow_context *subflow);
void mptcp_subflow_drop_ctx(struct sock *ssk);
static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
struct mptcp_subflow_context *ctx)
{

View File

@ -397,10 +397,15 @@ void mptcp_subflow_reset(struct sock *ssk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
/* mptcp_mp_fail_no_response() can reach here on an already closed
* socket
*/
if (ssk->sk_state == TCP_CLOSE)
return;
/* must hold: tcp_done() could drop last reference on parent */
sock_hold(sk);
tcp_set_state(ssk, TCP_CLOSE);
tcp_send_active_reset(ssk, GFP_ATOMIC);
tcp_done(ssk);
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
@ -622,7 +627,7 @@ static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init
static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
static struct proto tcpv6_prot_override;
static struct proto tcpv6_prot_override __ro_after_init;
static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
@ -693,9 +698,10 @@ static bool subflow_hmac_valid(const struct request_sock *req,
static void mptcp_force_close(struct sock *sk)
{
/* the msk is not yet exposed to user-space */
/* the msk is not yet exposed to user-space, and refcount is 2 */
inet_sk_state_store(sk, TCP_CLOSE);
sk_common_release(sk);
sock_put(sk);
}
static void subflow_ulp_fallback(struct sock *sk,
@ -711,7 +717,7 @@ static void subflow_ulp_fallback(struct sock *sk,
mptcp_subflow_ops_undo_override(sk);
}
static void subflow_drop_ctx(struct sock *ssk)
void mptcp_subflow_drop_ctx(struct sock *ssk)
{
struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
@ -750,6 +756,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct mptcp_options_received mp_opt;
bool fallback, fallback_is_fatal;
struct sock *new_msk = NULL;
struct mptcp_sock *owner;
struct sock *child;
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
@ -816,7 +823,7 @@ create_child:
if (new_msk)
mptcp_copy_inaddrs(new_msk, child);
subflow_drop_ctx(child);
mptcp_subflow_drop_ctx(child);
goto out;
}
@ -824,6 +831,8 @@ create_child:
ctx->setsockopt_seq = listener->setsockopt_seq;
if (ctx->mp_capable) {
owner = mptcp_sk(new_msk);
/* this can't race with mptcp_close(), as the msk is
* not yet exposted to user-space
*/
@ -832,14 +841,14 @@ create_child:
/* record the newly created socket as the first msk
* subflow, but don't link it yet into conn_list
*/
WRITE_ONCE(mptcp_sk(new_msk)->first, child);
WRITE_ONCE(owner->first, child);
/* new mpc subflow takes ownership of the newly
* created mptcp socket
*/
mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
mptcp_pm_new_connection(owner, child, 1);
mptcp_token_accept(subflow_req, owner);
ctx->conn = new_msk;
new_msk = NULL;
@ -847,15 +856,21 @@ create_child:
* uses the correct data
*/
mptcp_copy_inaddrs(ctx->conn, child);
mptcp_propagate_sndbuf(ctx->conn, child);
mptcp_rcv_space_init(owner, child);
list_add(&ctx->node, &owner->conn_list);
sock_hold(child);
/* with OoO packets we can reach here without ingress
* mpc option
*/
if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK)
if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
mptcp_subflow_fully_established(ctx, &mp_opt);
mptcp_pm_fully_established(owner, child, GFP_ATOMIC);
ctx->pm_notified = 1;
}
} else if (ctx->mp_join) {
struct mptcp_sock *owner;
owner = subflow_req->msk;
if (!owner) {
subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
@ -899,7 +914,7 @@ out:
return child;
dispose_child:
subflow_drop_ctx(child);
mptcp_subflow_drop_ctx(child);
tcp_rsk(req)->drop_req = true;
inet_csk_prepare_for_destroy_sock(child);
tcp_done(child);
@ -910,7 +925,7 @@ dispose_child:
}
static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
static struct proto tcp_prot_override;
static struct proto tcp_prot_override __ro_after_init;
enum mapping_status {
MAPPING_OK,
@ -1432,6 +1447,13 @@ static void subflow_error_report(struct sock *ssk)
{
struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
/* bail early if this is a no-op, so that we avoid introducing a
* problematic lockdep dependency between TCP accept queue lock
* and msk socket spinlock
*/
if (!sk->sk_socket)
return;
mptcp_data_lock(sk);
if (!sock_owned_by_user(sk))
__mptcp_error_report(sk);
@ -1803,79 +1825,6 @@ static void subflow_state_change(struct sock *sk)
}
}
void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
{
struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
struct mptcp_sock *msk, *next, *head = NULL;
struct request_sock *req;
/* build a list of all unaccepted mptcp sockets */
spin_lock_bh(&queue->rskq_lock);
for (req = queue->rskq_accept_head; req; req = req->dl_next) {
struct mptcp_subflow_context *subflow;
struct sock *ssk = req->sk;
struct mptcp_sock *msk;
if (!sk_is_mptcp(ssk))
continue;
subflow = mptcp_subflow_ctx(ssk);
if (!subflow || !subflow->conn)
continue;
/* skip if already in list */
msk = mptcp_sk(subflow->conn);
if (msk->dl_next || msk == head)
continue;
msk->dl_next = head;
head = msk;
}
spin_unlock_bh(&queue->rskq_lock);
if (!head)
return;
/* can't acquire the msk socket lock under the subflow one,
* or will cause ABBA deadlock
*/
release_sock(listener_ssk);
for (msk = head; msk; msk = next) {
struct sock *sk = (struct sock *)msk;
bool do_cancel_work;
sock_hold(sk);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
next = msk->dl_next;
msk->first = NULL;
msk->dl_next = NULL;
do_cancel_work = __mptcp_close(sk, 0);
release_sock(sk);
if (do_cancel_work) {
/* lockdep will report a false positive ABBA deadlock
* between cancel_work_sync and the listener socket.
* The involved locks belong to different sockets WRT
* the existing AB chain.
* Using a per socket key is problematic as key
* deregistration requires process context and must be
* performed at socket disposal time, in atomic
* context.
* Just tell lockdep to consider the listener socket
* released here.
*/
mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
mptcp_cancel_work(sk);
mutex_acquire(&listener_sk->sk_lock.dep_map,
SINGLE_DEPTH_NESTING, 0, _RET_IP_);
}
sock_put(sk);
}
/* we are still under the listener msk socket lock */
lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
}
static int subflow_ulp_init(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@ -1932,6 +1881,13 @@ static void subflow_ulp_release(struct sock *ssk)
* when the subflow is still unaccepted
*/
release = ctx->disposable || list_empty(&ctx->node);
/* inet_child_forget() does not call sk_state_change(),
* explicitly trigger the socket close machinery
*/
if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
&mptcp_sk(sk)->flags))
mptcp_schedule_work(sk);
sock_put(sk);
}

View File

@ -240,7 +240,7 @@ check_expected_one()
fi
stdbuf -o0 -e0 printf "\tExpected value for '%s': '%s', got '%s'.\n" \
"${var}" "${!var}" "${!exp}"
"${var}" "${!exp}" "${!var}"
return 1
}