mptcp: move __mptcp_error_report in protocol.c
This will simplify the next patch ("mptcp: process pending subflow error on close"). No functional change intended. Cc: stable@vger.kernel.org # v5.12+ Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Mat Martineau <martineau@kernel.org> Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6bec041147
commit
d5fbeff1ab
|
@ -770,6 +770,42 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
|
||||||
return moved;
|
return moved;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __mptcp_error_report(struct sock *sk)
|
||||||
|
{
|
||||||
|
struct mptcp_subflow_context *subflow;
|
||||||
|
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||||
|
|
||||||
|
mptcp_for_each_subflow(msk, subflow) {
|
||||||
|
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
||||||
|
int err = sock_error(ssk);
|
||||||
|
int ssk_state;
|
||||||
|
|
||||||
|
if (!err)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* only propagate errors on fallen-back sockets or
|
||||||
|
* on MPC connect
|
||||||
|
*/
|
||||||
|
if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* We need to propagate only transition to CLOSE state.
|
||||||
|
* Orphaned socket will see such state change via
|
||||||
|
* subflow_sched_work_if_closed() and that path will properly
|
||||||
|
* destroy the msk as needed.
|
||||||
|
*/
|
||||||
|
ssk_state = inet_sk_state_load(ssk);
|
||||||
|
if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
|
||||||
|
inet_sk_state_store(sk, ssk_state);
|
||||||
|
WRITE_ONCE(sk->sk_err, -err);
|
||||||
|
|
||||||
|
/* This barrier is coupled with smp_rmb() in mptcp_poll() */
|
||||||
|
smp_wmb();
|
||||||
|
sk_error_report(sk);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* In most cases we will be able to lock the mptcp socket. If its already
|
/* In most cases we will be able to lock the mptcp socket. If its already
|
||||||
* owned, we need to defer to the work queue to avoid ABBA deadlock.
|
* owned, we need to defer to the work queue to avoid ABBA deadlock.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1362,42 +1362,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
|
||||||
*full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
|
*full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
|
||||||
}
|
}
|
||||||
|
|
||||||
void __mptcp_error_report(struct sock *sk)
|
|
||||||
{
|
|
||||||
struct mptcp_subflow_context *subflow;
|
|
||||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
|
||||||
|
|
||||||
mptcp_for_each_subflow(msk, subflow) {
|
|
||||||
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
|
||||||
int err = sock_error(ssk);
|
|
||||||
int ssk_state;
|
|
||||||
|
|
||||||
if (!err)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* only propagate errors on fallen-back sockets or
|
|
||||||
* on MPC connect
|
|
||||||
*/
|
|
||||||
if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* We need to propagate only transition to CLOSE state.
|
|
||||||
* Orphaned socket will see such state change via
|
|
||||||
* subflow_sched_work_if_closed() and that path will properly
|
|
||||||
* destroy the msk as needed.
|
|
||||||
*/
|
|
||||||
ssk_state = inet_sk_state_load(ssk);
|
|
||||||
if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
|
|
||||||
inet_sk_state_store(sk, ssk_state);
|
|
||||||
WRITE_ONCE(sk->sk_err, -err);
|
|
||||||
|
|
||||||
/* This barrier is coupled with smp_rmb() in mptcp_poll() */
|
|
||||||
smp_wmb();
|
|
||||||
sk_error_report(sk);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void subflow_error_report(struct sock *ssk)
|
static void subflow_error_report(struct sock *ssk)
|
||||||
{
|
{
|
||||||
struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
|
struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
|
||||||
|
|
Loading…
Reference in New Issue