1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha2.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #include <net/transp_v6.h>
23 #include <net/mptcp.h>
24 #include <uapi/linux/mptcp.h>
28 #include <trace/events/mptcp.h>
30 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
32 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
33 enum linux_mptcp_mib_field field)
35 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
38 static void subflow_req_destructor(struct request_sock *req)
40 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
42 pr_debug("subflow_req=%p", subflow_req);
45 sock_put((struct sock *)subflow_req->msk);
47 mptcp_token_destroy_request(req);
50 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
55 put_unaligned_be32(nonce1, &msg[0]);
56 put_unaligned_be32(nonce2, &msg[4]);
58 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
61 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
63 return mptcp_is_fully_established((void *)msk) &&
64 ((mptcp_pm_is_userspace(msk) &&
65 mptcp_userspace_pm_active(msk)) ||
66 READ_ONCE(msk->pm.accept_subflow));
69 /* validate received token and create truncated hmac and nonce for SYN-ACK */
70 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
72 struct mptcp_sock *msk = subflow_req->msk;
73 u8 hmac[SHA256_DIGEST_SIZE];
75 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
77 subflow_generate_hmac(msk->local_key, msk->remote_key,
78 subflow_req->local_nonce,
79 subflow_req->remote_nonce, hmac);
81 subflow_req->thmac = get_unaligned_be64(hmac);
84 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
86 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
87 struct mptcp_sock *msk;
90 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
92 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
96 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
98 sock_put((struct sock *)msk);
101 subflow_req->local_id = local_id;
106 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
108 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
110 subflow_req->mp_capable = 0;
111 subflow_req->mp_join = 0;
112 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
113 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
114 subflow_req->msk = NULL;
115 mptcp_token_init_request(req);
118 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
120 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
123 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
125 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
128 memset(mpext, 0, sizeof(*mpext));
129 mpext->reset_reason = reason;
133 /* Init mptcp request socket.
135 * Returns an error code if a JOIN has failed and a TCP reset
138 static int subflow_check_req(struct request_sock *req,
139 const struct sock *sk_listener,
142 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
143 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
144 struct mptcp_options_received mp_opt;
145 bool opt_mp_capable, opt_mp_join;
147 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
149 #ifdef CONFIG_TCP_MD5SIG
150 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
153 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
157 mptcp_get_options(skb, &mp_opt);
159 opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
160 opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
161 if (opt_mp_capable) {
162 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
166 } else if (opt_mp_join) {
167 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
170 if (opt_mp_capable && listener->request_mptcp) {
171 int err, retries = MPTCP_TOKEN_MAX_RETRIES;
173 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
176 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
177 } while (subflow_req->local_key == 0);
179 if (unlikely(req->syncookie)) {
180 mptcp_crypto_key_sha(subflow_req->local_key,
183 if (mptcp_token_exists(subflow_req->token)) {
186 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
188 subflow_req->mp_capable = 1;
193 err = mptcp_token_new_request(req);
195 subflow_req->mp_capable = 1;
196 else if (retries-- > 0)
199 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
201 } else if (opt_mp_join && listener->request_mptcp) {
202 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
203 subflow_req->mp_join = 1;
204 subflow_req->backup = mp_opt.backup;
205 subflow_req->remote_id = mp_opt.join_id;
206 subflow_req->token = mp_opt.token;
207 subflow_req->remote_nonce = mp_opt.nonce;
208 subflow_req->msk = subflow_token_join_request(req);
210 /* Can't fall back to TCP in this case. */
211 if (!subflow_req->msk) {
212 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
216 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
217 pr_debug("syn inet_sport=%d %d",
218 ntohs(inet_sk(sk_listener)->inet_sport),
219 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
220 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
221 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
224 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
227 subflow_req_create_thmac(subflow_req);
229 if (unlikely(req->syncookie)) {
230 if (mptcp_can_accept_new_subflow(subflow_req->msk))
231 subflow_init_req_cookie_join_save(subflow_req, skb);
236 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
237 subflow_req->remote_nonce, subflow_req->msk);
243 int mptcp_subflow_init_cookie_req(struct request_sock *req,
244 const struct sock *sk_listener,
247 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
248 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
249 struct mptcp_options_received mp_opt;
250 bool opt_mp_capable, opt_mp_join;
253 subflow_init_req(req, sk_listener);
254 mptcp_get_options(skb, &mp_opt);
256 opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
257 opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
258 if (opt_mp_capable && opt_mp_join)
261 if (opt_mp_capable && listener->request_mptcp) {
262 if (mp_opt.sndr_key == 0)
265 subflow_req->local_key = mp_opt.rcvr_key;
266 err = mptcp_token_new_request(req);
270 subflow_req->mp_capable = 1;
271 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
272 } else if (opt_mp_join && listener->request_mptcp) {
273 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
276 subflow_req->mp_join = 1;
277 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
282 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
284 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
287 struct request_sock *req)
289 struct dst_entry *dst;
292 tcp_rsk(req)->is_mptcp = 1;
293 subflow_init_req(req, sk);
295 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
299 err = subflow_check_req(req, sk, skb);
305 tcp_request_sock_ops.send_reset(sk, skb);
309 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
310 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
313 struct request_sock *req)
315 struct dst_entry *dst;
318 tcp_rsk(req)->is_mptcp = 1;
319 subflow_init_req(req, sk);
321 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
325 err = subflow_check_req(req, sk, skb);
331 tcp6_request_sock_ops.send_reset(sk, skb);
336 /* validate received truncated hmac and create hmac for third ACK */
337 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
339 u8 hmac[SHA256_DIGEST_SIZE];
342 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
343 subflow->remote_nonce, subflow->local_nonce,
346 thmac = get_unaligned_be64(hmac);
347 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
348 subflow, subflow->token, thmac, subflow->thmac);
350 return thmac == subflow->thmac;
353 void mptcp_subflow_reset(struct sock *ssk)
355 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
356 struct sock *sk = subflow->conn;
358 /* mptcp_mp_fail_no_response() can reach here on an already closed
361 if (ssk->sk_state == TCP_CLOSE)
364 /* must hold: tcp_done() could drop last reference on parent */
367 tcp_send_active_reset(ssk, GFP_ATOMIC);
369 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
370 schedule_work(&mptcp_sk(sk)->work))
371 return; /* worker will put sk for us */
376 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
378 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
381 void __mptcp_set_connected(struct sock *sk)
383 if (sk->sk_state == TCP_SYN_SENT) {
384 inet_sk_state_store(sk, TCP_ESTABLISHED);
385 sk->sk_state_change(sk);
389 static void mptcp_set_connected(struct sock *sk)
392 if (!sock_owned_by_user(sk))
393 __mptcp_set_connected(sk);
395 __set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags);
396 mptcp_data_unlock(sk);
399 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
401 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
402 struct mptcp_options_received mp_opt;
403 struct sock *parent = subflow->conn;
405 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
407 /* be sure no special action on any packet other than syn-ack */
408 if (subflow->conn_finished)
411 mptcp_propagate_sndbuf(parent, sk);
412 subflow->rel_write_seq = 1;
413 subflow->conn_finished = 1;
414 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
415 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
417 mptcp_get_options(skb, &mp_opt);
418 if (subflow->request_mptcp) {
419 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
420 MPTCP_INC_STATS(sock_net(sk),
421 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
422 mptcp_do_fallback(sk);
423 pr_fallback(mptcp_sk(subflow->conn));
427 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
428 WRITE_ONCE(mptcp_sk(parent)->csum_enabled, true);
429 if (mp_opt.deny_join_id0)
430 WRITE_ONCE(mptcp_sk(parent)->pm.remote_deny_join_id0, true);
431 subflow->mp_capable = 1;
432 subflow->can_ack = 1;
433 subflow->remote_key = mp_opt.sndr_key;
434 pr_debug("subflow=%p, remote_key=%llu", subflow,
435 subflow->remote_key);
436 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
437 mptcp_finish_connect(sk);
438 mptcp_set_connected(parent);
439 } else if (subflow->request_join) {
440 u8 hmac[SHA256_DIGEST_SIZE];
442 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) {
443 subflow->reset_reason = MPTCP_RST_EMPTCP;
447 subflow->backup = mp_opt.backup;
448 subflow->thmac = mp_opt.thmac;
449 subflow->remote_nonce = mp_opt.nonce;
450 subflow->remote_id = mp_opt.join_id;
451 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
452 subflow, subflow->thmac, subflow->remote_nonce,
455 if (!subflow_thmac_valid(subflow)) {
456 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
457 subflow->reset_reason = MPTCP_RST_EMPTCP;
461 if (!mptcp_finish_join(sk))
464 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
465 subflow->local_nonce,
466 subflow->remote_nonce,
468 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
470 subflow->mp_join = 1;
471 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
473 if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
474 pr_debug("synack inet_dport=%d %d",
475 ntohs(inet_sk(sk)->inet_dport),
476 ntohs(inet_sk(parent)->inet_dport));
477 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
479 } else if (mptcp_check_fallback(sk)) {
481 mptcp_rcv_space_init(mptcp_sk(parent), sk);
482 mptcp_set_connected(parent);
487 subflow->reset_transient = 0;
488 mptcp_subflow_reset(sk);
491 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
493 subflow->local_id = local_id;
494 subflow->local_id_valid = 1;
497 static int subflow_chk_local_id(struct sock *sk)
499 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
500 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
503 if (likely(subflow->local_id_valid))
506 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
510 subflow_set_local_id(subflow, err);
514 static int subflow_rebuild_header(struct sock *sk)
516 int err = subflow_chk_local_id(sk);
518 if (unlikely(err < 0))
521 return inet_sk_rebuild_header(sk);
524 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
525 static int subflow_v6_rebuild_header(struct sock *sk)
527 int err = subflow_chk_local_id(sk);
529 if (unlikely(err < 0))
532 return inet6_sk_rebuild_header(sk);
536 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
537 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
539 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
541 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
543 pr_debug("subflow=%p", subflow);
545 /* Never answer to SYNs sent to broadcast or multicast */
546 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
549 return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
550 &subflow_request_sock_ipv4_ops,
557 static void subflow_v4_req_destructor(struct request_sock *req)
559 subflow_req_destructor(req);
560 tcp_request_sock_ops.destructor(req);
563 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
564 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
565 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
566 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
567 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
568 static struct proto tcpv6_prot_override __ro_after_init;
570 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
572 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
574 pr_debug("subflow=%p", subflow);
576 if (skb->protocol == htons(ETH_P_IP))
577 return subflow_v4_conn_request(sk, skb);
579 if (!ipv6_unicast_destination(skb))
582 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
583 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
587 return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
588 &subflow_request_sock_ipv6_ops, sk, skb);
592 return 0; /* don't send reset */
595 static void subflow_v6_req_destructor(struct request_sock *req)
597 subflow_req_destructor(req);
598 tcp6_request_sock_ops.destructor(req);
602 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
603 struct sock *sk_listener,
604 bool attach_listener)
606 if (ops->family == AF_INET)
607 ops = &mptcp_subflow_v4_request_sock_ops;
608 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
609 else if (ops->family == AF_INET6)
610 ops = &mptcp_subflow_v6_request_sock_ops;
613 return inet_reqsk_alloc(ops, sk_listener, attach_listener);
615 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
617 /* validate hmac received in third ACK */
618 static bool subflow_hmac_valid(const struct request_sock *req,
619 const struct mptcp_options_received *mp_opt)
621 const struct mptcp_subflow_request_sock *subflow_req;
622 u8 hmac[SHA256_DIGEST_SIZE];
623 struct mptcp_sock *msk;
625 subflow_req = mptcp_subflow_rsk(req);
626 msk = subflow_req->msk;
630 subflow_generate_hmac(msk->remote_key, msk->local_key,
631 subflow_req->remote_nonce,
632 subflow_req->local_nonce, hmac);
634 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
637 static void mptcp_force_close(struct sock *sk)
639 /* the msk is not yet exposed to user-space, and refcount is 2 */
640 inet_sk_state_store(sk, TCP_CLOSE);
641 sk_common_release(sk);
645 static void subflow_ulp_fallback(struct sock *sk,
646 struct mptcp_subflow_context *old_ctx)
648 struct inet_connection_sock *icsk = inet_csk(sk);
650 mptcp_subflow_tcp_fallback(sk, old_ctx);
651 icsk->icsk_ulp_ops = NULL;
652 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
653 tcp_sk(sk)->is_mptcp = 0;
655 mptcp_subflow_ops_undo_override(sk);
658 void mptcp_subflow_drop_ctx(struct sock *ssk)
660 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
665 subflow_ulp_fallback(ssk, ctx);
672 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
673 struct mptcp_options_received *mp_opt)
675 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
677 subflow->remote_key = mp_opt->sndr_key;
678 subflow->fully_established = 1;
679 subflow->can_ack = 1;
680 WRITE_ONCE(msk->fully_established, true);
683 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
685 struct request_sock *req,
686 struct dst_entry *dst,
687 struct request_sock *req_unhash,
690 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
691 struct mptcp_subflow_request_sock *subflow_req;
692 struct mptcp_options_received mp_opt;
693 bool fallback, fallback_is_fatal;
694 struct sock *new_msk = NULL;
695 struct mptcp_sock *owner;
698 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
700 /* After child creation we must look for MPC even when options
703 mp_opt.suboptions = 0;
705 /* hopefully temporary handling for MP_JOIN+syncookie */
706 subflow_req = mptcp_subflow_rsk(req);
707 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
708 fallback = !tcp_rsk(req)->is_mptcp;
712 /* if the sk is MP_CAPABLE, we try to fetch the client key */
713 if (subflow_req->mp_capable) {
714 /* we can receive and accept an in-window, out-of-order pkt,
715 * which may not carry the MP_CAPABLE opt even on mptcp enabled
716 * paths: always try to extract the peer key, and fallback
717 * for packets missing it.
718 * Even OoO DSS packets coming legitly after dropped or
719 * reordered MPC will cause fallback, but we don't have other
722 mptcp_get_options(skb, &mp_opt);
723 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
728 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
731 } else if (subflow_req->mp_join) {
732 mptcp_get_options(skb, &mp_opt);
733 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
734 !subflow_hmac_valid(req, &mp_opt) ||
735 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
736 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
742 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
743 req_unhash, own_req);
745 if (child && *own_req) {
746 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
748 tcp_rsk(req)->drop_req = false;
750 /* we need to fallback on ctx allocation failure and on pre-reqs
751 * checking above. In the latter scenario we additionally need
752 * to reset the context to non MPTCP status.
754 if (!ctx || fallback) {
755 if (fallback_is_fatal) {
756 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
761 mptcp_copy_inaddrs(new_msk, child);
762 mptcp_subflow_drop_ctx(child);
766 /* ssk inherits options of listener sk */
767 ctx->setsockopt_seq = listener->setsockopt_seq;
769 if (ctx->mp_capable) {
770 owner = mptcp_sk(new_msk);
772 /* this can't race with mptcp_close(), as the msk is
773 * not yet exposted to user-space
775 inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
777 /* record the newly created socket as the first msk
778 * subflow, but don't link it yet into conn_list
780 WRITE_ONCE(owner->first, child);
782 /* new mpc subflow takes ownership of the newly
783 * created mptcp socket
785 mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
786 mptcp_pm_new_connection(owner, child, 1);
787 mptcp_token_accept(subflow_req, owner);
791 /* set msk addresses early to ensure mptcp_pm_get_local_id()
792 * uses the correct data
794 mptcp_copy_inaddrs(ctx->conn, child);
795 mptcp_propagate_sndbuf(ctx->conn, child);
797 mptcp_rcv_space_init(owner, child);
798 list_add(&ctx->node, &owner->conn_list);
801 /* with OoO packets we can reach here without ingress
804 if (mp_opt.suboptions & OPTIONS_MPTCP_MPC) {
805 mptcp_subflow_fully_established(ctx, &mp_opt);
806 mptcp_pm_fully_established(owner, child, GFP_ATOMIC);
807 ctx->pm_notified = 1;
809 } else if (ctx->mp_join) {
810 owner = subflow_req->msk;
812 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
816 /* move the msk reference ownership to the subflow */
817 subflow_req->msk = NULL;
818 ctx->conn = (struct sock *)owner;
820 if (subflow_use_different_sport(owner, sk)) {
821 pr_debug("ack inet_sport=%d %d",
822 ntohs(inet_sk(sk)->inet_sport),
823 ntohs(inet_sk((struct sock *)owner)->inet_sport));
824 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
825 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
828 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
831 if (!mptcp_finish_join(child))
834 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
835 tcp_rsk(req)->drop_req = true;
840 /* dispose of the left over mptcp master, if any */
841 if (unlikely(new_msk))
842 mptcp_force_close(new_msk);
844 /* check for expected invariant - should never trigger, just help
845 * catching eariler subtle bugs
847 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
848 (!mptcp_subflow_ctx(child) ||
849 !mptcp_subflow_ctx(child)->conn));
853 mptcp_subflow_drop_ctx(child);
854 tcp_rsk(req)->drop_req = true;
855 inet_csk_prepare_for_destroy_sock(child);
857 req->rsk_ops->send_reset(sk, skb);
859 /* The last child reference will be released by the caller */
863 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
864 static struct proto tcp_prot_override __ro_after_init;
866 enum mapping_status {
875 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
877 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
878 ssn, subflow->map_subflow_seq, subflow->map_data_len);
881 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
883 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
884 unsigned int skb_consumed;
886 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
887 if (WARN_ON_ONCE(skb_consumed >= skb->len))
890 return skb->len - skb_consumed <= subflow->map_data_len -
891 mptcp_subflow_get_map_offset(subflow);
894 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
896 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
897 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
899 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
900 /* Mapping covers data later in the subflow stream,
901 * currently unsupported.
903 dbg_bad_map(subflow, ssn);
906 if (unlikely(!before(ssn, subflow->map_subflow_seq +
907 subflow->map_data_len))) {
908 /* Mapping does covers past subflow data, invalid */
909 dbg_bad_map(subflow, ssn);
915 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
918 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
919 u32 offset, seq, delta;
926 /* mapping already validated on previous traversal */
927 if (subflow->map_csum_len == subflow->map_data_len)
930 /* traverse the receive queue, ensuring it contains a full
931 * DSS mapping and accumulating the related csum.
932 * Preserve the accoumlate csum across multiple calls, to compute
935 delta = subflow->map_data_len - subflow->map_csum_len;
937 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
938 offset = seq - TCP_SKB_CB(skb)->seq;
940 /* if the current skb has not been accounted yet, csum its contents
941 * up to the amount covered by the current DSS
943 if (offset < skb->len) {
946 len = min(skb->len - offset, delta);
947 csum = skb_checksum(skb, offset, len, 0);
948 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
949 subflow->map_csum_len);
952 subflow->map_csum_len += len;
957 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
958 /* if this subflow is closed, the partial mapping
959 * will be never completed; flush the pending skbs, so
960 * that subflow_sched_work_if_closed() can kick in
962 if (unlikely(ssk->sk_state == TCP_CLOSE))
963 while ((skb = skb_peek(&ssk->sk_receive_queue)))
964 sk_eat_skb(ssk, skb);
966 /* not enough data to validate the csum */
967 return MAPPING_EMPTY;
970 /* the DSS mapping for next skbs will be validated later,
971 * when a get_mapping_status call will process such skb
976 /* note that 'map_data_len' accounts only for the carried data, does
977 * not include the eventual seq increment due to the data fin,
978 * while the pseudo header requires the original DSS data len,
981 csum = __mptcp_make_csum(subflow->map_seq,
982 subflow->map_subflow_seq,
983 subflow->map_data_len + subflow->map_data_fin,
984 subflow->map_data_csum);
985 if (unlikely(csum)) {
986 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
987 return MAPPING_BAD_CSUM;
990 subflow->valid_csum_seen = 1;
994 static enum mapping_status get_mapping_status(struct sock *ssk,
995 struct mptcp_sock *msk)
997 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
998 bool csum_reqd = READ_ONCE(msk->csum_enabled);
999 struct mptcp_ext *mpext;
1000 struct sk_buff *skb;
1004 skb = skb_peek(&ssk->sk_receive_queue);
1006 return MAPPING_EMPTY;
1008 if (mptcp_check_fallback(ssk))
1009 return MAPPING_DUMMY;
1011 mpext = mptcp_get_ext(skb);
1012 if (!mpext || !mpext->use_map) {
1013 if (!subflow->map_valid && !skb->len) {
1014 /* the TCP stack deliver 0 len FIN pkt to the receive
1015 * queue, that is the only 0len pkts ever expected here,
1016 * and we can admit no mapping only for 0 len pkts
1018 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1019 WARN_ONCE(1, "0len seq %d:%d flags %x",
1020 TCP_SKB_CB(skb)->seq,
1021 TCP_SKB_CB(skb)->end_seq,
1022 TCP_SKB_CB(skb)->tcp_flags);
1023 sk_eat_skb(ssk, skb);
1024 return MAPPING_EMPTY;
1027 if (!subflow->map_valid)
1028 return MAPPING_INVALID;
1033 trace_get_mapping_status(mpext);
1035 data_len = mpext->data_len;
1036 if (data_len == 0) {
1037 pr_debug("infinite mapping received");
1038 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
1039 subflow->map_data_len = 0;
1040 return MAPPING_INVALID;
1043 if (mpext->data_fin == 1) {
1044 if (data_len == 1) {
1045 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1047 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
1048 if (subflow->map_valid) {
1049 /* A DATA_FIN might arrive in a DSS
1050 * option before the previous mapping
1051 * has been fully consumed. Continue
1052 * handling the existing mapping.
1054 skb_ext_del(skb, SKB_EXT_MPTCP);
1057 if (updated && schedule_work(&msk->work))
1058 sock_hold((struct sock *)msk);
1060 return MAPPING_DATA_FIN;
1063 u64 data_fin_seq = mpext->data_seq + data_len - 1;
1065 /* If mpext->data_seq is a 32-bit value, data_fin_seq
1066 * must also be limited to 32 bits.
1069 data_fin_seq &= GENMASK_ULL(31, 0);
1071 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1072 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1073 data_fin_seq, mpext->dsn64);
1076 /* Adjust for DATA_FIN using 1 byte of sequence space */
1080 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1081 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1083 if (subflow->map_valid) {
1084 /* Allow replacing only with an identical map */
1085 if (subflow->map_seq == map_seq &&
1086 subflow->map_subflow_seq == mpext->subflow_seq &&
1087 subflow->map_data_len == data_len &&
1088 subflow->map_csum_reqd == mpext->csum_reqd) {
1089 skb_ext_del(skb, SKB_EXT_MPTCP);
1093 /* If this skb data are fully covered by the current mapping,
1094 * the new map would need caching, which is not supported
1096 if (skb_is_fully_mapped(ssk, skb)) {
1097 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1098 return MAPPING_INVALID;
1101 /* will validate the next map after consuming the current one */
1105 subflow->map_seq = map_seq;
1106 subflow->map_subflow_seq = mpext->subflow_seq;
1107 subflow->map_data_len = data_len;
1108 subflow->map_valid = 1;
1109 subflow->map_data_fin = mpext->data_fin;
1110 subflow->mpc_map = mpext->mpc_map;
1111 subflow->map_csum_reqd = mpext->csum_reqd;
1112 subflow->map_csum_len = 0;
1113 subflow->map_data_csum = csum_unfold(mpext->csum);
1115 /* Cfr RFC 8684 Section 3.3.0 */
1116 if (unlikely(subflow->map_csum_reqd != csum_reqd))
1117 return MAPPING_INVALID;
1119 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1120 subflow->map_seq, subflow->map_subflow_seq,
1121 subflow->map_data_len, subflow->map_csum_reqd,
1122 subflow->map_data_csum);
1125 /* we revalidate valid mapping on new skb, because we must ensure
1126 * the current skb is completely covered by the available mapping
1128 if (!validate_mapping(ssk, skb)) {
1129 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1130 return MAPPING_INVALID;
1133 skb_ext_del(skb, SKB_EXT_MPTCP);
1136 return validate_data_csum(ssk, skb, csum_reqd);
1139 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1142 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1143 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1146 incr = limit >= skb->len ? skb->len + fin : limit;
1148 pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
1149 subflow->map_subflow_seq);
1150 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1151 tcp_sk(ssk)->copied_seq += incr;
1152 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1153 sk_eat_skb(ssk, skb);
1154 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1155 subflow->map_valid = 0;
1158 /* sched mptcp worker to remove the subflow if no more data is pending */
1159 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1161 struct sock *sk = (struct sock *)msk;
1163 if (likely(ssk->sk_state != TCP_CLOSE))
1166 if (skb_queue_empty(&ssk->sk_receive_queue) &&
1167 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
1169 if (!schedule_work(&msk->work))
1174 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
1176 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1178 if (subflow->mp_join)
1180 else if (READ_ONCE(msk->csum_enabled))
1181 return !subflow->valid_csum_seen;
1183 return !subflow->fully_established;
1186 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1188 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1189 unsigned long fail_tout;
1191 /* greceful failure can happen only on the MPC subflow */
1192 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1195 /* since the close timeout take precedence on the fail one,
1196 * no need to start the latter when the first is already set
1198 if (sock_flag((struct sock *)msk, SOCK_DEAD))
1201 /* we don't need extreme accuracy here, use a zero fail_tout as special
1202 * value meaning no fail timeout at all;
1204 fail_tout = jiffies + TCP_RTO_MAX;
1207 WRITE_ONCE(subflow->fail_tout, fail_tout);
1210 mptcp_reset_timeout(msk, subflow->fail_tout);
1213 static bool subflow_check_data_avail(struct sock *ssk)
1215 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1216 enum mapping_status status;
1217 struct mptcp_sock *msk;
1218 struct sk_buff *skb;
1220 if (!skb_peek(&ssk->sk_receive_queue))
1221 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1222 if (subflow->data_avail)
1225 msk = mptcp_sk(subflow->conn);
1230 status = get_mapping_status(ssk, msk);
1231 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1232 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
1233 status == MAPPING_BAD_CSUM))
1236 if (status != MAPPING_OK)
1239 skb = skb_peek(&ssk->sk_receive_queue);
1240 if (WARN_ON_ONCE(!skb))
1243 /* if msk lacks the remote key, this subflow must provide an
1244 * MP_CAPABLE-based mapping
1246 if (unlikely(!READ_ONCE(msk->can_ack))) {
1247 if (!subflow->mpc_map)
1249 WRITE_ONCE(msk->remote_key, subflow->remote_key);
1250 WRITE_ONCE(msk->ack_seq, subflow->map_seq);
1251 WRITE_ONCE(msk->can_ack, true);
1254 old_ack = READ_ONCE(msk->ack_seq);
1255 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1256 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1258 if (unlikely(before64(ack_seq, old_ack))) {
1259 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1263 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1269 subflow_sched_work_if_closed(msk, ssk);
1273 if (!__mptcp_check_fallback(msk)) {
1274 /* RFC 8684 section 3.7. */
1275 if (status == MAPPING_BAD_CSUM &&
1276 (subflow->mp_join || subflow->valid_csum_seen)) {
1277 subflow->send_mp_fail = 1;
1279 if (!READ_ONCE(msk->allow_infinite_fallback)) {
1280 subflow->reset_transient = 0;
1281 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1284 mptcp_subflow_fail(msk, ssk);
1285 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1289 if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
1290 /* fatal protocol error, close the socket.
1291 * subflow_error_report() will introduce the appropriate barriers
1293 subflow->reset_transient = 0;
1294 subflow->reset_reason = MPTCP_RST_EMPTCP;
1297 ssk->sk_err = EBADMSG;
1298 tcp_set_state(ssk, TCP_CLOSE);
1299 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1300 sk_eat_skb(ssk, skb);
1301 tcp_send_active_reset(ssk, GFP_ATOMIC);
1302 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1306 mptcp_do_fallback(ssk);
1309 skb = skb_peek(&ssk->sk_receive_queue);
1310 subflow->map_valid = 1;
1311 subflow->map_seq = READ_ONCE(msk->ack_seq);
1312 subflow->map_data_len = skb->len;
1313 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1314 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1318 bool mptcp_subflow_data_available(struct sock *sk)
1320 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1322 /* check if current mapping is still valid */
1323 if (subflow->map_valid &&
1324 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1325 subflow->map_valid = 0;
1326 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1328 pr_debug("Done with mapping: seq=%u data_len=%u",
1329 subflow->map_subflow_seq,
1330 subflow->map_data_len);
1333 return subflow_check_data_avail(sk);
1336 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1339 * In mptcp, rwin is about the mptcp-level connection data.
1341 * Data that is still on the ssk rx queue can thus be ignored,
1342 * as far as mptcp peer is concerned that data is still inflight.
1343 * DSS ACK is updated when skb is moved to the mptcp rx queue.
1345 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1347 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1348 const struct sock *sk = subflow->conn;
1350 *space = __mptcp_space(sk);
1351 *full_space = tcp_full_space(sk);
1354 void __mptcp_error_report(struct sock *sk)
1356 struct mptcp_subflow_context *subflow;
1357 struct mptcp_sock *msk = mptcp_sk(sk);
1359 mptcp_for_each_subflow(msk, subflow) {
1360 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1361 int err = sock_error(ssk);
1367 /* only propagate errors on fallen-back sockets or
1370 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
1373 /* We need to propagate only transition to CLOSE state.
1374 * Orphaned socket will see such state change via
1375 * subflow_sched_work_if_closed() and that path will properly
1376 * destroy the msk as needed.
1378 ssk_state = inet_sk_state_load(ssk);
1379 if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
1380 inet_sk_state_store(sk, ssk_state);
1383 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
1385 sk_error_report(sk);
1390 static void subflow_error_report(struct sock *ssk)
1392 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1394 /* bail early if this is a no-op, so that we avoid introducing a
1395 * problematic lockdep dependency between TCP accept queue lock
1396 * and msk socket spinlock
1401 mptcp_data_lock(sk);
1402 if (!sock_owned_by_user(sk))
1403 __mptcp_error_report(sk);
1405 __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags);
1406 mptcp_data_unlock(sk);
1409 static void subflow_data_ready(struct sock *sk)
1411 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1412 u16 state = 1 << inet_sk_state_load(sk);
1413 struct sock *parent = subflow->conn;
1414 struct mptcp_sock *msk;
1416 msk = mptcp_sk(parent);
1417 if (state & TCPF_LISTEN) {
1418 /* MPJ subflow are removed from accept queue before reaching here,
1419 * avoid stray wakeups
1421 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1424 parent->sk_data_ready(parent);
1428 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1429 !subflow->mp_join && !(state & TCPF_CLOSE));
1431 if (mptcp_subflow_data_available(sk))
1432 mptcp_data_ready(parent, sk);
1433 else if (unlikely(sk->sk_err))
1434 subflow_error_report(sk);
1437 static void subflow_write_space(struct sock *ssk)
1439 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1441 mptcp_propagate_sndbuf(sk, ssk);
1442 mptcp_write_space(sk);
1445 static const struct inet_connection_sock_af_ops *
1446 subflow_default_af_ops(struct sock *sk)
1448 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1449 if (sk->sk_family == AF_INET6)
1450 return &subflow_v6_specific;
1452 return &subflow_specific;
1455 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1456 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1458 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1459 struct inet_connection_sock *icsk = inet_csk(sk);
1460 const struct inet_connection_sock_af_ops *target;
1462 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1464 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1465 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1467 if (likely(icsk->icsk_af_ops == target))
1470 subflow->icsk_af_ops = icsk->icsk_af_ops;
1471 icsk->icsk_af_ops = target;
1475 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1476 struct sockaddr_storage *addr,
1477 unsigned short family)
1479 memset(addr, 0, sizeof(*addr));
1480 addr->ss_family = family;
1481 if (addr->ss_family == AF_INET) {
1482 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1484 if (info->family == AF_INET)
1485 in_addr->sin_addr = info->addr;
1486 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1487 else if (ipv6_addr_v4mapped(&info->addr6))
1488 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1490 in_addr->sin_port = info->port;
1492 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1493 else if (addr->ss_family == AF_INET6) {
1494 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1496 if (info->family == AF_INET)
1497 ipv6_addr_set_v4mapped(info->addr.s_addr,
1498 &in6_addr->sin6_addr);
1500 in6_addr->sin6_addr = info->addr6;
1501 in6_addr->sin6_port = info->port;
1506 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1507 const struct mptcp_addr_info *remote)
1509 struct mptcp_sock *msk = mptcp_sk(sk);
1510 struct mptcp_subflow_context *subflow;
1511 struct sockaddr_storage addr;
1512 int remote_id = remote->id;
1513 int local_id = loc->id;
1514 int err = -ENOTCONN;
1522 if (!mptcp_is_fully_established(sk))
1525 err = mptcp_subflow_create_socket(sk, loc->family, &sf);
1530 subflow = mptcp_subflow_ctx(ssk);
1532 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1533 } while (!subflow->local_nonce);
1536 subflow_set_local_id(subflow, local_id);
1538 mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
1540 subflow->remote_key = msk->remote_key;
1541 subflow->local_key = msk->local_key;
1542 subflow->token = msk->token;
1543 mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1545 addrlen = sizeof(struct sockaddr_in);
1546 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1547 if (addr.ss_family == AF_INET6)
1548 addrlen = sizeof(struct sockaddr_in6);
1550 mptcp_sockopt_sync(msk, ssk);
1552 ssk->sk_bound_dev_if = ifindex;
1553 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1557 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1558 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1559 remote_token, local_id, remote_id);
1560 subflow->remote_token = remote_token;
1561 subflow->remote_id = remote_id;
1562 subflow->request_join = 1;
1563 subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1564 mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1567 list_add_tail(&subflow->node, &msk->conn_list);
1568 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1569 if (err && err != -EINPROGRESS)
1572 /* discard the subflow socket */
1573 mptcp_sock_graft(ssk, sk->sk_socket);
1574 iput(SOCK_INODE(sf));
1575 WRITE_ONCE(msk->allow_infinite_fallback, false);
1579 list_del(&subflow->node);
1580 sock_put(mptcp_subflow_tcp_sock(subflow));
1583 subflow->disposable = 1;
1587 /* we account subflows before the creation, and this failures will not
1588 * be caught by sk_state_change()
1590 mptcp_pm_close_subflow(msk);
1594 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1596 #ifdef CONFIG_SOCK_CGROUP_DATA
1597 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1598 *child_skcd = &child->sk_cgrp_data;
1600 /* only the additional subflows created by kworkers have to be modified */
1601 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1602 cgroup_id(sock_cgroup_ptr(child_skcd))) {
1604 struct mem_cgroup *memcg = parent->sk_memcg;
1606 mem_cgroup_sk_free(child);
1607 if (memcg && css_tryget(&memcg->css))
1608 child->sk_memcg = memcg;
1609 #endif /* CONFIG_MEMCG */
1611 cgroup_sk_free(child_skcd);
1612 *child_skcd = *parent_skcd;
1613 cgroup_sk_clone(child_skcd);
1615 #endif /* CONFIG_SOCK_CGROUP_DATA */
1618 static void mptcp_subflow_ops_override(struct sock *ssk)
1620 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1621 if (ssk->sk_prot == &tcpv6_prot)
1622 ssk->sk_prot = &tcpv6_prot_override;
1625 ssk->sk_prot = &tcp_prot_override;
1628 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1630 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1631 if (ssk->sk_prot == &tcpv6_prot_override)
1632 ssk->sk_prot = &tcpv6_prot;
1635 ssk->sk_prot = &tcp_prot;
1638 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
1639 struct socket **new_sock)
1641 struct mptcp_subflow_context *subflow;
1642 struct net *net = sock_net(sk);
1646 /* un-accepted server sockets can reach here - on bad configuration
1647 * bail early to avoid greater trouble later
1649 if (unlikely(!sk->sk_socket))
1652 err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
1656 lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
1658 /* the newly created socket has to be in the same cgroup as its parent */
1659 mptcp_attach_cgroup(sk, sf->sk);
1661 /* kernel sockets do not by default acquire net ref, but TCP timer
1664 sf->sk->sk_net_refcnt = 1;
1665 get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1666 sock_inuse_add(net, 1);
1667 err = tcp_set_ulp(sf->sk, "mptcp");
1668 release_sock(sf->sk);
1675 /* the newly created socket really belongs to the owning MPTCP master
1676 * socket, even if for additional subflows the allocation is performed
1677 * by a kernel workqueue. Adjust inode references, so that the
1678 * procfs/diag interfaces really show this one belonging to the correct
1681 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1682 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1683 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1685 subflow = mptcp_subflow_ctx(sf->sk);
1686 pr_debug("subflow=%p", subflow);
1691 mptcp_subflow_ops_override(sf->sk);
1696 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1699 struct inet_connection_sock *icsk = inet_csk(sk);
1700 struct mptcp_subflow_context *ctx;
1702 ctx = kzalloc(sizeof(*ctx), priority);
1706 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1707 INIT_LIST_HEAD(&ctx->node);
1708 INIT_LIST_HEAD(&ctx->delegated_node);
1710 pr_debug("subflow=%p", ctx);
1717 static void __subflow_state_change(struct sock *sk)
1719 struct socket_wq *wq;
1722 wq = rcu_dereference(sk->sk_wq);
1723 if (skwq_has_sleeper(wq))
1724 wake_up_interruptible_all(&wq->wait);
1728 static bool subflow_is_done(const struct sock *sk)
1730 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1733 static void subflow_state_change(struct sock *sk)
1735 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1736 struct sock *parent = subflow->conn;
1738 __subflow_state_change(sk);
1740 if (subflow_simultaneous_connect(sk)) {
1741 mptcp_propagate_sndbuf(parent, sk);
1742 mptcp_do_fallback(sk);
1743 mptcp_rcv_space_init(mptcp_sk(parent), sk);
1744 pr_fallback(mptcp_sk(parent));
1745 subflow->conn_finished = 1;
1746 mptcp_set_connected(parent);
1749 /* as recvmsg() does not acquire the subflow socket for ssk selection
1750 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1751 * the data available machinery here.
1753 if (mptcp_subflow_data_available(sk))
1754 mptcp_data_ready(parent, sk);
1755 else if (unlikely(sk->sk_err))
1756 subflow_error_report(sk);
1758 subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1760 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1761 !subflow->rx_eof && subflow_is_done(sk)) {
1762 subflow->rx_eof = 1;
1763 mptcp_subflow_eof(parent);
1767 static int subflow_ulp_init(struct sock *sk)
1769 struct inet_connection_sock *icsk = inet_csk(sk);
1770 struct mptcp_subflow_context *ctx;
1771 struct tcp_sock *tp = tcp_sk(sk);
1774 /* disallow attaching ULP to a socket unless it has been
1775 * created with sock_create_kern()
1777 if (!sk->sk_kern_sock) {
1782 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1788 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1791 ctx->icsk_af_ops = icsk->icsk_af_ops;
1792 icsk->icsk_af_ops = subflow_default_af_ops(sk);
1793 ctx->tcp_state_change = sk->sk_state_change;
1794 ctx->tcp_error_report = sk->sk_error_report;
1796 WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
1797 WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
1799 sk->sk_data_ready = subflow_data_ready;
1800 sk->sk_write_space = subflow_write_space;
1801 sk->sk_state_change = subflow_state_change;
1802 sk->sk_error_report = subflow_error_report;
1807 static void subflow_ulp_release(struct sock *ssk)
1809 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1810 bool release = true;
1818 /* if the msk has been orphaned, keep the ctx
1819 * alive, will be freed by __mptcp_close_ssk(),
1820 * when the subflow is still unaccepted
1822 release = ctx->disposable || list_empty(&ctx->node);
1824 /* inet_child_forget() does not call sk_state_change(),
1825 * explicitly trigger the socket close machinery
1827 if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
1828 &mptcp_sk(sk)->flags))
1829 mptcp_schedule_work(sk);
1833 mptcp_subflow_ops_undo_override(ssk);
1835 kfree_rcu(ctx, rcu);
1838 static void subflow_ulp_clone(const struct request_sock *req,
1840 const gfp_t priority)
1842 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1843 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1844 struct mptcp_subflow_context *new_ctx;
1846 if (!tcp_rsk(req)->is_mptcp ||
1847 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1848 subflow_ulp_fallback(newsk, old_ctx);
1852 new_ctx = subflow_create_ctx(newsk, priority);
1854 subflow_ulp_fallback(newsk, old_ctx);
1858 new_ctx->conn_finished = 1;
1859 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1860 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1861 new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1862 new_ctx->rel_write_seq = 1;
1863 new_ctx->tcp_sock = newsk;
1865 if (subflow_req->mp_capable) {
1866 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1867 * is fully established only after we receive the remote key
1869 new_ctx->mp_capable = 1;
1870 new_ctx->local_key = subflow_req->local_key;
1871 new_ctx->token = subflow_req->token;
1872 new_ctx->ssn_offset = subflow_req->ssn_offset;
1873 new_ctx->idsn = subflow_req->idsn;
1875 /* this is the first subflow, id is always 0 */
1876 new_ctx->local_id_valid = 1;
1877 } else if (subflow_req->mp_join) {
1878 new_ctx->ssn_offset = subflow_req->ssn_offset;
1879 new_ctx->mp_join = 1;
1880 new_ctx->fully_established = 1;
1881 new_ctx->backup = subflow_req->backup;
1882 new_ctx->remote_id = subflow_req->remote_id;
1883 new_ctx->token = subflow_req->token;
1884 new_ctx->thmac = subflow_req->thmac;
1886 /* the subflow req id is valid, fetched via subflow_check_req()
1887 * and subflow_token_join_request()
1889 subflow_set_local_id(new_ctx, subflow_req->local_id);
1893 static void tcp_release_cb_override(struct sock *ssk)
1895 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1897 if (mptcp_subflow_has_delegated_action(subflow))
1898 mptcp_subflow_process_delegated(ssk);
1900 tcp_release_cb(ssk);
1903 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1905 .owner = THIS_MODULE,
1906 .init = subflow_ulp_init,
1907 .release = subflow_ulp_release,
1908 .clone = subflow_ulp_clone,
1911 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1913 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1915 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1916 subflow_ops->obj_size, 0,
1918 SLAB_TYPESAFE_BY_RCU,
1920 if (!subflow_ops->slab)
1926 void __init mptcp_subflow_init(void)
1928 mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
1929 mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
1930 mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
1932 if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
1933 panic("MPTCP: failed to init subflow v4 request sock ops\n");
1935 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1936 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
1938 subflow_specific = ipv4_specific;
1939 subflow_specific.conn_request = subflow_v4_conn_request;
1940 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1941 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1942 subflow_specific.rebuild_header = subflow_rebuild_header;
1944 tcp_prot_override = tcp_prot;
1945 tcp_prot_override.release_cb = tcp_release_cb_override;
1947 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1948 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock
1949 * structures for v4 and v6 have the same size. It should not changed in
1950 * the future but better to make sure to be warned if it is no longer
1953 BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
1955 mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
1956 mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
1957 mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
1959 if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
1960 panic("MPTCP: failed to init subflow v6 request sock ops\n");
1962 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1963 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
1965 subflow_v6_specific = ipv6_specific;
1966 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1967 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1968 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1969 subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
1971 subflow_v6m_specific = subflow_v6_specific;
1972 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1973 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1974 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1975 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1976 subflow_v6m_specific.net_frag_header_len = 0;
1977 subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
1979 tcpv6_prot_override = tcpv6_prot;
1980 tcpv6_prot_override.release_cb = tcp_release_cb_override;
1983 mptcp_diag_subflow_init(&subflow_ulp_ops);
1985 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1986 panic("MPTCP: failed to register subflows to ULP\n");