mptcp: fix shutdown vs fallback race
authorPaolo Abeni <pabeni@redhat.com>
Tue, 28 Jun 2022 01:02:38 +0000 (18:02 -0700)
committerJakub Kicinski <kuba@kernel.org>
Wed, 29 Jun 2022 03:45:42 +0000 (20:45 -0700)
If the MPTCP socket shutdown happens before a fallback
to TCP, and all the pending data have been already spooled,
we never close the TCP connection.

Address the issue explicitly checking for critical condition
at fallback time.

Fixes: 1e39e5a32ad7 ("mptcp: infinite mapping sending")
Fixes: 0348c690ed37 ("mptcp: add the fallback check")
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c

index 2a6351d55a7d9d0bcb3163d410f564b51f94698b..aead331866a0ce8612b418f9c0e25f3a6946ef77 100644 (file)
@@ -967,7 +967,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
                        goto reset;
                subflow->mp_capable = 0;
                pr_fallback(msk);
-               __mptcp_do_fallback(msk);
+               mptcp_do_fallback(ssk);
                return false;
        }
 
index e6fcb61443dd6cf4b632e63882f944d3ab606951..e63bc2bb7fff030cb734fa5df2d88c3598c6f056 100644 (file)
@@ -1245,7 +1245,7 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
        MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
        mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
        pr_fallback(msk);
-       __mptcp_do_fallback(msk);
+       mptcp_do_fallback(ssk);
 }
 
 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
index 1d2d71711872501653a75eded20118819b57f2c0..9860179bfd5edaa7f7d16887bd8ccd8f6d07de85 100644 (file)
@@ -927,12 +927,25 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
        set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
 }
 
-static inline void mptcp_do_fallback(struct sock *sk)
+static inline void mptcp_do_fallback(struct sock *ssk)
 {
-       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
-       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+       struct sock *sk = subflow->conn;
+       struct mptcp_sock *msk;
 
+       msk = mptcp_sk(sk);
        __mptcp_do_fallback(msk);
+       if (READ_ONCE(msk->snd_data_fin_enable) && !(ssk->sk_shutdown & SEND_SHUTDOWN)) {
+               gfp_t saved_allocation = ssk->sk_allocation;
+
+               /* we are in a atomic (BH) scope, override ssk default for data
+                * fin allocation
+                */
+               ssk->sk_allocation = GFP_ATOMIC;
+               ssk->sk_shutdown |= SEND_SHUTDOWN;
+               tcp_shutdown(ssk, SEND_SHUTDOWN);
+               ssk->sk_allocation = saved_allocation;
+       }
 }
 
 #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
index 8dfea6a8a82f643fc7fd01984f9172a99d45fce1..b34b96fb742f92fe6932768845ae5a52f34df310 100644 (file)
@@ -1279,7 +1279,7 @@ fallback:
                        return false;
                }
 
-               __mptcp_do_fallback(msk);
+               mptcp_do_fallback(ssk);
        }
 
        skb = skb_peek(&ssk->sk_receive_queue);