From d51991e2e31477853e5b9c1005ac617707077286 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 27 Jun 2022 18:02:38 -0700 Subject: mptcp: fix shutdown vs fallback race If the MPTCP socket shutdown happens before a fallback to TCP, and all the pending data have been already spooled, we never close the TCP connection. Address the issue explicitly checking for critical condition at fallback time. Fixes: 1e39e5a32ad7 ("mptcp: infinite mapping sending") Fixes: 0348c690ed37 ("mptcp: add the fallback check") Signed-off-by: Paolo Abeni Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- net/mptcp/options.c | 2 +- net/mptcp/protocol.c | 2 +- net/mptcp/protocol.h | 19 ++++++++++++++++--- net/mptcp/subflow.c | 2 +- 4 files changed, 19 insertions(+), 6 deletions(-) (limited to 'net/mptcp') diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 2a6351d55a7d..aead331866a0 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -967,7 +967,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, goto reset; subflow->mp_capable = 0; pr_fallback(msk); - __mptcp_do_fallback(msk); + mptcp_do_fallback(ssk); return false; } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index e6fcb61443dd..e63bc2bb7fff 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1245,7 +1245,7 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk, MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX); mptcp_subflow_ctx(ssk)->send_infinite_map = 0; pr_fallback(msk); - __mptcp_do_fallback(msk); + mptcp_do_fallback(ssk); } static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 1d2d71711872..9860179bfd5e 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -927,12 +927,25 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk) set_bit(MPTCP_FALLBACK_DONE, &msk->flags); } -static inline void mptcp_do_fallback(struct sock *sk) +static inline void mptcp_do_fallback(struct sock *ssk) { - struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); - struct mptcp_sock *msk = mptcp_sk(subflow->conn); + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); + struct sock *sk = subflow->conn; + struct mptcp_sock *msk; + msk = mptcp_sk(sk); __mptcp_do_fallback(msk); + if (READ_ONCE(msk->snd_data_fin_enable) && !(ssk->sk_shutdown & SEND_SHUTDOWN)) { + gfp_t saved_allocation = ssk->sk_allocation; + + /* we are in a atomic (BH) scope, override ssk default for data + * fin allocation + */ + ssk->sk_allocation = GFP_ATOMIC; + ssk->sk_shutdown |= SEND_SHUTDOWN; + tcp_shutdown(ssk, SEND_SHUTDOWN); + ssk->sk_allocation = saved_allocation; + } } #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 8dfea6a8a82f..b34b96fb742f 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1279,7 +1279,7 @@ fallback: return false; } - __mptcp_do_fallback(msk); + mptcp_do_fallback(ssk); } skb = skb_peek(&ssk->sk_receive_queue); -- cgit v1.2.3