if (unlikely(mptcp_check_fallback(sk)))
return false;
+ /* prevent adding of any MPTCP related options on reset packet
+ * until we support MP_TCPRST/MP_FASTCLOSE
+ */
+ if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
+ return false;
+
if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts))
ret = true;
else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining,
return false;
}
-static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
+static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
struct mptcp_subflow_context *subflow,
struct sk_buff *skb,
struct mptcp_options_received *mp_opt)
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
subflow->mp_join && mp_opt->mp_join &&
READ_ONCE(msk->pm.server_side))
- tcp_send_ack(sk);
+ tcp_send_ack(ssk);
goto fully_established;
}
- /* we should process OoO packets before the first subflow is fully
- * established, but not expected for MP_JOIN subflows
+ /* we must process OoO packets before the first subflow is fully
+ * established. OoO packets are instead a protocol violation
+ * for MP_JOIN subflows as the peer must not send any data
+ * before receiving the forth ack - cfr. RFC 8684 section 3.2.
*/
- if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1)
+ if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
+ if (subflow->mp_join)
+ goto reset;
return subflow->mp_capable;
+ }
if (mp_opt->dss && mp_opt->use_ack) {
/* subflows are fully established as soon as we get any
}
/* If the first established packet does not contain MP_CAPABLE + data
- * then fallback to TCP
+ * then fallback to TCP. Fallback scenarios requires a reset for
+ * MP_JOIN subflows.
*/
if (!mp_opt->mp_capable) {
+ if (subflow->mp_join)
+ goto reset;
subflow->mp_capable = 0;
pr_fallback(msk);
__mptcp_do_fallback(msk);
subflow->pm_notified = 1;
if (subflow->mp_join) {
- clear_3rdack_retransmission(sk);
+ clear_3rdack_retransmission(ssk);
mptcp_pm_subflow_established(msk, subflow);
} else {
mptcp_pm_fully_established(msk);
}
return true;
+
+reset:
+ mptcp_subflow_reset(ssk);
+ return false;
}
static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
return thmac == subflow->thmac;
}
+void mptcp_subflow_reset(struct sock *ssk)
+{
+ tcp_set_state(ssk, TCP_CLOSE);
+ tcp_send_active_reset(ssk, GFP_ATOMIC);
+ tcp_done(ssk);
+}
+
static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
return;
do_reset:
- tcp_send_active_reset(sk, GFP_ATOMIC);
- tcp_done(sk);
+ mptcp_subflow_reset(sk);
}
struct request_sock_ops mptcp_subflow_request_sock_ops;