mptcp: Check for orphaned subflow before handling MP_FAIL timer

MP_FAIL timeout (waiting for a peer to respond to an MP_FAIL with
another MP_FAIL) is implemented using the MPTCP socket's sk_timer. That
timer is also used at MPTCP socket close, so it's important to not have
the two timer users interfere with each other.

At MPTCP socket close, all subflows are orphaned before sk_timer is
manipulated. By checking the SOCK_DEAD flag on the subflows, each
subflow can determine if the timer is safe to alter without acquiring
any MPTCP-level lock. This replaces code that was using the
mptcp_data_lock and MPTCP-level socket state checks that did not
correctly protect the timer.

Fixes: 49fa1919d6 ("mptcp: reset subflow when MP_FAIL doesn't respond")
Reviewed-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Mat Martineau 2022-05-18 15:04:44 -07:00 committed by Jakub Kicinski
parent 7b16871f99
commit d42f9e4e23
2 changed files with 6 additions and 13 deletions

View File

@ -312,13 +312,10 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
subflow->send_mp_fail = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
subflow->send_infinite_map = 1;
} else if (s && inet_sk_state_load(s) != TCP_CLOSE) {
} else if (!sock_flag(sk, SOCK_DEAD)) {
pr_debug("MP_FAIL response received");
mptcp_data_lock(s);
if (inet_sk_state_load(s) != TCP_CLOSE)
sk_stop_timer(s, &s->sk_timer);
mptcp_data_unlock(s);
sk_stop_timer(s, &s->sk_timer);
}
}

View File

@ -1016,12 +1016,9 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
pr_debug("infinite mapping received");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
subflow->map_data_len = 0;
if (sk && inet_sk_state_load(sk) != TCP_CLOSE) {
mptcp_data_lock(sk);
if (inet_sk_state_load(sk) != TCP_CLOSE)
sk_stop_timer(sk, &sk->sk_timer);
mptcp_data_unlock(sk);
}
if (!sock_flag(ssk, SOCK_DEAD))
sk_stop_timer(sk, &sk->sk_timer);
return MAPPING_INVALID;
}
@ -1241,9 +1238,8 @@ fallback:
tcp_send_active_reset(ssk, GFP_ATOMIC);
while ((skb = skb_peek(&ssk->sk_receive_queue)))
sk_eat_skb(ssk, skb);
} else {
} else if (!sock_flag(ssk, SOCK_DEAD)) {
WRITE_ONCE(subflow->mp_fail_response_expect, true);
/* The data lock is acquired in __mptcp_move_skbs() */
sk_reset_timer((struct sock *)msk,
&((struct sock *)msk)->sk_timer,
jiffies + TCP_RTO_MAX);