Merge tag 'mm-hotfixes-stable-2022-12-02' of git://git.kernel.org/pub/scm/linux/kerne...
[platform/kernel/linux-starfive.git] / net / mptcp / subflow.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha2.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #include <net/transp_v6.h>
22 #endif
23 #include <net/mptcp.h>
24 #include <uapi/linux/mptcp.h>
25 #include "protocol.h"
26 #include "mib.h"
27
28 #include <trace/events/mptcp.h>
29
30 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
31
32 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
33                                   enum linux_mptcp_mib_field field)
34 {
35         MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
36 }
37
38 static void subflow_req_destructor(struct request_sock *req)
39 {
40         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
41
42         pr_debug("subflow_req=%p", subflow_req);
43
44         if (subflow_req->msk)
45                 sock_put((struct sock *)subflow_req->msk);
46
47         mptcp_token_destroy_request(req);
48         tcp_request_sock_ops.destructor(req);
49 }
50
51 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
52                                   void *hmac)
53 {
54         u8 msg[8];
55
56         put_unaligned_be32(nonce1, &msg[0]);
57         put_unaligned_be32(nonce2, &msg[4]);
58
59         mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
60 }
61
62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
63 {
64         return mptcp_is_fully_established((void *)msk) &&
65                 ((mptcp_pm_is_userspace(msk) &&
66                   mptcp_userspace_pm_active(msk)) ||
67                  READ_ONCE(msk->pm.accept_subflow));
68 }
69
70 /* validate received token and create truncated hmac and nonce for SYN-ACK */
71 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
72 {
73         struct mptcp_sock *msk = subflow_req->msk;
74         u8 hmac[SHA256_DIGEST_SIZE];
75
76         get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
77
78         subflow_generate_hmac(msk->local_key, msk->remote_key,
79                               subflow_req->local_nonce,
80                               subflow_req->remote_nonce, hmac);
81
82         subflow_req->thmac = get_unaligned_be64(hmac);
83 }
84
85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
86 {
87         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
88         struct mptcp_sock *msk;
89         int local_id;
90
91         msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
92         if (!msk) {
93                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
94                 return NULL;
95         }
96
97         local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
98         if (local_id < 0) {
99                 sock_put((struct sock *)msk);
100                 return NULL;
101         }
102         subflow_req->local_id = local_id;
103
104         return msk;
105 }
106
107 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
108 {
109         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
110
111         subflow_req->mp_capable = 0;
112         subflow_req->mp_join = 0;
113         subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
114         subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
115         subflow_req->msk = NULL;
116         mptcp_token_init_request(req);
117 }
118
119 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
120 {
121         return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
122 }
123
124 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
125 {
126         struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
127
128         if (mpext) {
129                 memset(mpext, 0, sizeof(*mpext));
130                 mpext->reset_reason = reason;
131         }
132 }
133
134 /* Init mptcp request socket.
135  *
136  * Returns an error code if a JOIN has failed and a TCP reset
137  * should be sent.
138  */
139 static int subflow_check_req(struct request_sock *req,
140                              const struct sock *sk_listener,
141                              struct sk_buff *skb)
142 {
143         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
144         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
145         struct mptcp_options_received mp_opt;
146         bool opt_mp_capable, opt_mp_join;
147
148         pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
149
150 #ifdef CONFIG_TCP_MD5SIG
151         /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
152          * TCP option space.
153          */
154         if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
155                 return -EINVAL;
156 #endif
157
158         mptcp_get_options(skb, &mp_opt);
159
160         opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
161         opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
162         if (opt_mp_capable) {
163                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
164
165                 if (opt_mp_join)
166                         return 0;
167         } else if (opt_mp_join) {
168                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
169         }
170
171         if (opt_mp_capable && listener->request_mptcp) {
172                 int err, retries = MPTCP_TOKEN_MAX_RETRIES;
173
174                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
175 again:
176                 do {
177                         get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
178                 } while (subflow_req->local_key == 0);
179
180                 if (unlikely(req->syncookie)) {
181                         mptcp_crypto_key_sha(subflow_req->local_key,
182                                              &subflow_req->token,
183                                              &subflow_req->idsn);
184                         if (mptcp_token_exists(subflow_req->token)) {
185                                 if (retries-- > 0)
186                                         goto again;
187                                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
188                         } else {
189                                 subflow_req->mp_capable = 1;
190                         }
191                         return 0;
192                 }
193
194                 err = mptcp_token_new_request(req);
195                 if (err == 0)
196                         subflow_req->mp_capable = 1;
197                 else if (retries-- > 0)
198                         goto again;
199                 else
200                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
201
202         } else if (opt_mp_join && listener->request_mptcp) {
203                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
204                 subflow_req->mp_join = 1;
205                 subflow_req->backup = mp_opt.backup;
206                 subflow_req->remote_id = mp_opt.join_id;
207                 subflow_req->token = mp_opt.token;
208                 subflow_req->remote_nonce = mp_opt.nonce;
209                 subflow_req->msk = subflow_token_join_request(req);
210
211                 /* Can't fall back to TCP in this case. */
212                 if (!subflow_req->msk) {
213                         subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
214                         return -EPERM;
215                 }
216
217                 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
218                         pr_debug("syn inet_sport=%d %d",
219                                  ntohs(inet_sk(sk_listener)->inet_sport),
220                                  ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
221                         if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
222                                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
223                                 return -EPERM;
224                         }
225                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
226                 }
227
228                 subflow_req_create_thmac(subflow_req);
229
230                 if (unlikely(req->syncookie)) {
231                         if (mptcp_can_accept_new_subflow(subflow_req->msk))
232                                 subflow_init_req_cookie_join_save(subflow_req, skb);
233                         else
234                                 return -EPERM;
235                 }
236
237                 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
238                          subflow_req->remote_nonce, subflow_req->msk);
239         }
240
241         return 0;
242 }
243
244 int mptcp_subflow_init_cookie_req(struct request_sock *req,
245                                   const struct sock *sk_listener,
246                                   struct sk_buff *skb)
247 {
248         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
249         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
250         struct mptcp_options_received mp_opt;
251         bool opt_mp_capable, opt_mp_join;
252         int err;
253
254         subflow_init_req(req, sk_listener);
255         mptcp_get_options(skb, &mp_opt);
256
257         opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
258         opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
259         if (opt_mp_capable && opt_mp_join)
260                 return -EINVAL;
261
262         if (opt_mp_capable && listener->request_mptcp) {
263                 if (mp_opt.sndr_key == 0)
264                         return -EINVAL;
265
266                 subflow_req->local_key = mp_opt.rcvr_key;
267                 err = mptcp_token_new_request(req);
268                 if (err)
269                         return err;
270
271                 subflow_req->mp_capable = 1;
272                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
273         } else if (opt_mp_join && listener->request_mptcp) {
274                 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
275                         return -EINVAL;
276
277                 subflow_req->mp_join = 1;
278                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
279         }
280
281         return 0;
282 }
283 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
284
285 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
286                                               struct sk_buff *skb,
287                                               struct flowi *fl,
288                                               struct request_sock *req)
289 {
290         struct dst_entry *dst;
291         int err;
292
293         tcp_rsk(req)->is_mptcp = 1;
294         subflow_init_req(req, sk);
295
296         dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
297         if (!dst)
298                 return NULL;
299
300         err = subflow_check_req(req, sk, skb);
301         if (err == 0)
302                 return dst;
303
304         dst_release(dst);
305         if (!req->syncookie)
306                 tcp_request_sock_ops.send_reset(sk, skb);
307         return NULL;
308 }
309
310 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
311 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
312                                               struct sk_buff *skb,
313                                               struct flowi *fl,
314                                               struct request_sock *req)
315 {
316         struct dst_entry *dst;
317         int err;
318
319         tcp_rsk(req)->is_mptcp = 1;
320         subflow_init_req(req, sk);
321
322         dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
323         if (!dst)
324                 return NULL;
325
326         err = subflow_check_req(req, sk, skb);
327         if (err == 0)
328                 return dst;
329
330         dst_release(dst);
331         if (!req->syncookie)
332                 tcp6_request_sock_ops.send_reset(sk, skb);
333         return NULL;
334 }
335 #endif
336
337 /* validate received truncated hmac and create hmac for third ACK */
338 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
339 {
340         u8 hmac[SHA256_DIGEST_SIZE];
341         u64 thmac;
342
343         subflow_generate_hmac(subflow->remote_key, subflow->local_key,
344                               subflow->remote_nonce, subflow->local_nonce,
345                               hmac);
346
347         thmac = get_unaligned_be64(hmac);
348         pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
349                  subflow, subflow->token, thmac, subflow->thmac);
350
351         return thmac == subflow->thmac;
352 }
353
354 void mptcp_subflow_reset(struct sock *ssk)
355 {
356         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
357         struct sock *sk = subflow->conn;
358
359         /* must hold: tcp_done() could drop last reference on parent */
360         sock_hold(sk);
361
362         tcp_set_state(ssk, TCP_CLOSE);
363         tcp_send_active_reset(ssk, GFP_ATOMIC);
364         tcp_done(ssk);
365         if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
366             schedule_work(&mptcp_sk(sk)->work))
367                 return; /* worker will put sk for us */
368
369         sock_put(sk);
370 }
371
372 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
373 {
374         return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
375 }
376
377 void __mptcp_set_connected(struct sock *sk)
378 {
379         if (sk->sk_state == TCP_SYN_SENT) {
380                 inet_sk_state_store(sk, TCP_ESTABLISHED);
381                 sk->sk_state_change(sk);
382         }
383 }
384
385 static void mptcp_set_connected(struct sock *sk)
386 {
387         mptcp_data_lock(sk);
388         if (!sock_owned_by_user(sk))
389                 __mptcp_set_connected(sk);
390         else
391                 __set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags);
392         mptcp_data_unlock(sk);
393 }
394
395 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
396 {
397         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
398         struct mptcp_options_received mp_opt;
399         struct sock *parent = subflow->conn;
400
401         subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
402
403         /* be sure no special action on any packet other than syn-ack */
404         if (subflow->conn_finished)
405                 return;
406
407         mptcp_propagate_sndbuf(parent, sk);
408         subflow->rel_write_seq = 1;
409         subflow->conn_finished = 1;
410         subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
411         pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
412
413         mptcp_get_options(skb, &mp_opt);
414         if (subflow->request_mptcp) {
415                 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
416                         MPTCP_INC_STATS(sock_net(sk),
417                                         MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
418                         mptcp_do_fallback(sk);
419                         pr_fallback(mptcp_sk(subflow->conn));
420                         goto fallback;
421                 }
422
423                 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
424                         WRITE_ONCE(mptcp_sk(parent)->csum_enabled, true);
425                 if (mp_opt.deny_join_id0)
426                         WRITE_ONCE(mptcp_sk(parent)->pm.remote_deny_join_id0, true);
427                 subflow->mp_capable = 1;
428                 subflow->can_ack = 1;
429                 subflow->remote_key = mp_opt.sndr_key;
430                 pr_debug("subflow=%p, remote_key=%llu", subflow,
431                          subflow->remote_key);
432                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
433                 mptcp_finish_connect(sk);
434                 mptcp_set_connected(parent);
435         } else if (subflow->request_join) {
436                 u8 hmac[SHA256_DIGEST_SIZE];
437
438                 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) {
439                         subflow->reset_reason = MPTCP_RST_EMPTCP;
440                         goto do_reset;
441                 }
442
443                 subflow->backup = mp_opt.backup;
444                 subflow->thmac = mp_opt.thmac;
445                 subflow->remote_nonce = mp_opt.nonce;
446                 subflow->remote_id = mp_opt.join_id;
447                 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
448                          subflow, subflow->thmac, subflow->remote_nonce,
449                          subflow->backup);
450
451                 if (!subflow_thmac_valid(subflow)) {
452                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
453                         subflow->reset_reason = MPTCP_RST_EMPTCP;
454                         goto do_reset;
455                 }
456
457                 if (!mptcp_finish_join(sk))
458                         goto do_reset;
459
460                 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
461                                       subflow->local_nonce,
462                                       subflow->remote_nonce,
463                                       hmac);
464                 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
465
466                 subflow->mp_join = 1;
467                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
468
469                 if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
470                         pr_debug("synack inet_dport=%d %d",
471                                  ntohs(inet_sk(sk)->inet_dport),
472                                  ntohs(inet_sk(parent)->inet_dport));
473                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
474                 }
475         } else if (mptcp_check_fallback(sk)) {
476 fallback:
477                 mptcp_rcv_space_init(mptcp_sk(parent), sk);
478                 mptcp_set_connected(parent);
479         }
480         return;
481
482 do_reset:
483         subflow->reset_transient = 0;
484         mptcp_subflow_reset(sk);
485 }
486
487 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
488 {
489         subflow->local_id = local_id;
490         subflow->local_id_valid = 1;
491 }
492
493 static int subflow_chk_local_id(struct sock *sk)
494 {
495         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
496         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
497         int err;
498
499         if (likely(subflow->local_id_valid))
500                 return 0;
501
502         err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
503         if (err < 0)
504                 return err;
505
506         subflow_set_local_id(subflow, err);
507         return 0;
508 }
509
510 static int subflow_rebuild_header(struct sock *sk)
511 {
512         int err = subflow_chk_local_id(sk);
513
514         if (unlikely(err < 0))
515                 return err;
516
517         return inet_sk_rebuild_header(sk);
518 }
519
520 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
521 static int subflow_v6_rebuild_header(struct sock *sk)
522 {
523         int err = subflow_chk_local_id(sk);
524
525         if (unlikely(err < 0))
526                 return err;
527
528         return inet6_sk_rebuild_header(sk);
529 }
530 #endif
531
532 struct request_sock_ops mptcp_subflow_request_sock_ops;
533 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
534
535 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
536 {
537         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
538
539         pr_debug("subflow=%p", subflow);
540
541         /* Never answer to SYNs sent to broadcast or multicast */
542         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
543                 goto drop;
544
545         return tcp_conn_request(&mptcp_subflow_request_sock_ops,
546                                 &subflow_request_sock_ipv4_ops,
547                                 sk, skb);
548 drop:
549         tcp_listendrop(sk);
550         return 0;
551 }
552
553 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
554 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
555 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
556 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
557 static struct proto tcpv6_prot_override;
558
559 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
560 {
561         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
562
563         pr_debug("subflow=%p", subflow);
564
565         if (skb->protocol == htons(ETH_P_IP))
566                 return subflow_v4_conn_request(sk, skb);
567
568         if (!ipv6_unicast_destination(skb))
569                 goto drop;
570
571         if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
572                 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
573                 return 0;
574         }
575
576         return tcp_conn_request(&mptcp_subflow_request_sock_ops,
577                                 &subflow_request_sock_ipv6_ops, sk, skb);
578
579 drop:
580         tcp_listendrop(sk);
581         return 0; /* don't send reset */
582 }
583 #endif
584
585 /* validate hmac received in third ACK */
586 static bool subflow_hmac_valid(const struct request_sock *req,
587                                const struct mptcp_options_received *mp_opt)
588 {
589         const struct mptcp_subflow_request_sock *subflow_req;
590         u8 hmac[SHA256_DIGEST_SIZE];
591         struct mptcp_sock *msk;
592
593         subflow_req = mptcp_subflow_rsk(req);
594         msk = subflow_req->msk;
595         if (!msk)
596                 return false;
597
598         subflow_generate_hmac(msk->remote_key, msk->local_key,
599                               subflow_req->remote_nonce,
600                               subflow_req->local_nonce, hmac);
601
602         return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
603 }
604
605 static void mptcp_force_close(struct sock *sk)
606 {
607         /* the msk is not yet exposed to user-space */
608         inet_sk_state_store(sk, TCP_CLOSE);
609         sk_common_release(sk);
610 }
611
612 static void subflow_ulp_fallback(struct sock *sk,
613                                  struct mptcp_subflow_context *old_ctx)
614 {
615         struct inet_connection_sock *icsk = inet_csk(sk);
616
617         mptcp_subflow_tcp_fallback(sk, old_ctx);
618         icsk->icsk_ulp_ops = NULL;
619         rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
620         tcp_sk(sk)->is_mptcp = 0;
621
622         mptcp_subflow_ops_undo_override(sk);
623 }
624
625 static void subflow_drop_ctx(struct sock *ssk)
626 {
627         struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
628
629         if (!ctx)
630                 return;
631
632         subflow_ulp_fallback(ssk, ctx);
633         if (ctx->conn)
634                 sock_put(ctx->conn);
635
636         kfree_rcu(ctx, rcu);
637 }
638
639 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
640                                      struct mptcp_options_received *mp_opt)
641 {
642         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
643
644         subflow->remote_key = mp_opt->sndr_key;
645         subflow->fully_established = 1;
646         subflow->can_ack = 1;
647         WRITE_ONCE(msk->fully_established, true);
648 }
649
650 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
651                                           struct sk_buff *skb,
652                                           struct request_sock *req,
653                                           struct dst_entry *dst,
654                                           struct request_sock *req_unhash,
655                                           bool *own_req)
656 {
657         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
658         struct mptcp_subflow_request_sock *subflow_req;
659         struct mptcp_options_received mp_opt;
660         bool fallback, fallback_is_fatal;
661         struct sock *new_msk = NULL;
662         struct sock *child;
663
664         pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
665
666         /* After child creation we must look for MPC even when options
667          * are not parsed
668          */
669         mp_opt.suboptions = 0;
670
671         /* hopefully temporary handling for MP_JOIN+syncookie */
672         subflow_req = mptcp_subflow_rsk(req);
673         fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
674         fallback = !tcp_rsk(req)->is_mptcp;
675         if (fallback)
676                 goto create_child;
677
678         /* if the sk is MP_CAPABLE, we try to fetch the client key */
679         if (subflow_req->mp_capable) {
680                 /* we can receive and accept an in-window, out-of-order pkt,
681                  * which may not carry the MP_CAPABLE opt even on mptcp enabled
682                  * paths: always try to extract the peer key, and fallback
683                  * for packets missing it.
684                  * Even OoO DSS packets coming legitly after dropped or
685                  * reordered MPC will cause fallback, but we don't have other
686                  * options.
687                  */
688                 mptcp_get_options(skb, &mp_opt);
689                 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
690                         fallback = true;
691                         goto create_child;
692                 }
693
694                 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
695                 if (!new_msk)
696                         fallback = true;
697         } else if (subflow_req->mp_join) {
698                 mptcp_get_options(skb, &mp_opt);
699                 if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
700                     !subflow_hmac_valid(req, &mp_opt) ||
701                     !mptcp_can_accept_new_subflow(subflow_req->msk)) {
702                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
703                         fallback = true;
704                 }
705         }
706
707 create_child:
708         child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
709                                                      req_unhash, own_req);
710
711         if (child && *own_req) {
712                 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
713
714                 tcp_rsk(req)->drop_req = false;
715
716                 /* we need to fallback on ctx allocation failure and on pre-reqs
717                  * checking above. In the latter scenario we additionally need
718                  * to reset the context to non MPTCP status.
719                  */
720                 if (!ctx || fallback) {
721                         if (fallback_is_fatal) {
722                                 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
723                                 goto dispose_child;
724                         }
725
726                         if (new_msk)
727                                 mptcp_copy_inaddrs(new_msk, child);
728                         subflow_drop_ctx(child);
729                         goto out;
730                 }
731
732                 /* ssk inherits options of listener sk */
733                 ctx->setsockopt_seq = listener->setsockopt_seq;
734
735                 if (ctx->mp_capable) {
736                         /* this can't race with mptcp_close(), as the msk is
737                          * not yet exposted to user-space
738                          */
739                         inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
740
741                         /* record the newly created socket as the first msk
742                          * subflow, but don't link it yet into conn_list
743                          */
744                         WRITE_ONCE(mptcp_sk(new_msk)->first, child);
745
746                         /* new mpc subflow takes ownership of the newly
747                          * created mptcp socket
748                          */
749                         mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
750                         mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
751                         mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
752                         ctx->conn = new_msk;
753                         new_msk = NULL;
754
755                         /* set msk addresses early to ensure mptcp_pm_get_local_id()
756                          * uses the correct data
757                          */
758                         mptcp_copy_inaddrs(ctx->conn, child);
759
760                         /* with OoO packets we can reach here without ingress
761                          * mpc option
762                          */
763                         if (mp_opt.suboptions & OPTIONS_MPTCP_MPC)
764                                 mptcp_subflow_fully_established(ctx, &mp_opt);
765                 } else if (ctx->mp_join) {
766                         struct mptcp_sock *owner;
767
768                         owner = subflow_req->msk;
769                         if (!owner) {
770                                 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
771                                 goto dispose_child;
772                         }
773
774                         /* move the msk reference ownership to the subflow */
775                         subflow_req->msk = NULL;
776                         ctx->conn = (struct sock *)owner;
777
778                         if (subflow_use_different_sport(owner, sk)) {
779                                 pr_debug("ack inet_sport=%d %d",
780                                          ntohs(inet_sk(sk)->inet_sport),
781                                          ntohs(inet_sk((struct sock *)owner)->inet_sport));
782                                 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
783                                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
784                                         goto dispose_child;
785                                 }
786                                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
787                         }
788
789                         if (!mptcp_finish_join(child))
790                                 goto dispose_child;
791
792                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
793                         tcp_rsk(req)->drop_req = true;
794                 }
795         }
796
797 out:
798         /* dispose of the left over mptcp master, if any */
799         if (unlikely(new_msk))
800                 mptcp_force_close(new_msk);
801
802         /* check for expected invariant - should never trigger, just help
803          * catching eariler subtle bugs
804          */
805         WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
806                      (!mptcp_subflow_ctx(child) ||
807                       !mptcp_subflow_ctx(child)->conn));
808         return child;
809
810 dispose_child:
811         subflow_drop_ctx(child);
812         tcp_rsk(req)->drop_req = true;
813         inet_csk_prepare_for_destroy_sock(child);
814         tcp_done(child);
815         req->rsk_ops->send_reset(sk, skb);
816
817         /* The last child reference will be released by the caller */
818         return child;
819 }
820
821 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
822 static struct proto tcp_prot_override;
823
824 enum mapping_status {
825         MAPPING_OK,
826         MAPPING_INVALID,
827         MAPPING_EMPTY,
828         MAPPING_DATA_FIN,
829         MAPPING_DUMMY,
830         MAPPING_BAD_CSUM
831 };
832
833 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
834 {
835         pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
836                  ssn, subflow->map_subflow_seq, subflow->map_data_len);
837 }
838
839 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
840 {
841         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
842         unsigned int skb_consumed;
843
844         skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
845         if (WARN_ON_ONCE(skb_consumed >= skb->len))
846                 return true;
847
848         return skb->len - skb_consumed <= subflow->map_data_len -
849                                           mptcp_subflow_get_map_offset(subflow);
850 }
851
852 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
853 {
854         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
855         u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
856
857         if (unlikely(before(ssn, subflow->map_subflow_seq))) {
858                 /* Mapping covers data later in the subflow stream,
859                  * currently unsupported.
860                  */
861                 dbg_bad_map(subflow, ssn);
862                 return false;
863         }
864         if (unlikely(!before(ssn, subflow->map_subflow_seq +
865                                   subflow->map_data_len))) {
866                 /* Mapping does covers past subflow data, invalid */
867                 dbg_bad_map(subflow, ssn);
868                 return false;
869         }
870         return true;
871 }
872
873 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
874                                               bool csum_reqd)
875 {
876         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
877         u32 offset, seq, delta;
878         __sum16 csum;
879         int len;
880
881         if (!csum_reqd)
882                 return MAPPING_OK;
883
884         /* mapping already validated on previous traversal */
885         if (subflow->map_csum_len == subflow->map_data_len)
886                 return MAPPING_OK;
887
888         /* traverse the receive queue, ensuring it contains a full
889          * DSS mapping and accumulating the related csum.
890          * Preserve the accoumlate csum across multiple calls, to compute
891          * the csum only once
892          */
893         delta = subflow->map_data_len - subflow->map_csum_len;
894         for (;;) {
895                 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
896                 offset = seq - TCP_SKB_CB(skb)->seq;
897
898                 /* if the current skb has not been accounted yet, csum its contents
899                  * up to the amount covered by the current DSS
900                  */
901                 if (offset < skb->len) {
902                         __wsum csum;
903
904                         len = min(skb->len - offset, delta);
905                         csum = skb_checksum(skb, offset, len, 0);
906                         subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
907                                                                 subflow->map_csum_len);
908
909                         delta -= len;
910                         subflow->map_csum_len += len;
911                 }
912                 if (delta == 0)
913                         break;
914
915                 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
916                         /* if this subflow is closed, the partial mapping
917                          * will be never completed; flush the pending skbs, so
918                          * that subflow_sched_work_if_closed() can kick in
919                          */
920                         if (unlikely(ssk->sk_state == TCP_CLOSE))
921                                 while ((skb = skb_peek(&ssk->sk_receive_queue)))
922                                         sk_eat_skb(ssk, skb);
923
924                         /* not enough data to validate the csum */
925                         return MAPPING_EMPTY;
926                 }
927
928                 /* the DSS mapping for next skbs will be validated later,
929                  * when a get_mapping_status call will process such skb
930                  */
931                 skb = skb->next;
932         }
933
934         /* note that 'map_data_len' accounts only for the carried data, does
935          * not include the eventual seq increment due to the data fin,
936          * while the pseudo header requires the original DSS data len,
937          * including that
938          */
939         csum = __mptcp_make_csum(subflow->map_seq,
940                                  subflow->map_subflow_seq,
941                                  subflow->map_data_len + subflow->map_data_fin,
942                                  subflow->map_data_csum);
943         if (unlikely(csum)) {
944                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
945                 return MAPPING_BAD_CSUM;
946         }
947
948         subflow->valid_csum_seen = 1;
949         return MAPPING_OK;
950 }
951
952 static enum mapping_status get_mapping_status(struct sock *ssk,
953                                               struct mptcp_sock *msk)
954 {
955         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
956         bool csum_reqd = READ_ONCE(msk->csum_enabled);
957         struct mptcp_ext *mpext;
958         struct sk_buff *skb;
959         u16 data_len;
960         u64 map_seq;
961
962         skb = skb_peek(&ssk->sk_receive_queue);
963         if (!skb)
964                 return MAPPING_EMPTY;
965
966         if (mptcp_check_fallback(ssk))
967                 return MAPPING_DUMMY;
968
969         mpext = mptcp_get_ext(skb);
970         if (!mpext || !mpext->use_map) {
971                 if (!subflow->map_valid && !skb->len) {
972                         /* the TCP stack deliver 0 len FIN pkt to the receive
973                          * queue, that is the only 0len pkts ever expected here,
974                          * and we can admit no mapping only for 0 len pkts
975                          */
976                         if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
977                                 WARN_ONCE(1, "0len seq %d:%d flags %x",
978                                           TCP_SKB_CB(skb)->seq,
979                                           TCP_SKB_CB(skb)->end_seq,
980                                           TCP_SKB_CB(skb)->tcp_flags);
981                         sk_eat_skb(ssk, skb);
982                         return MAPPING_EMPTY;
983                 }
984
985                 if (!subflow->map_valid)
986                         return MAPPING_INVALID;
987
988                 goto validate_seq;
989         }
990
991         trace_get_mapping_status(mpext);
992
993         data_len = mpext->data_len;
994         if (data_len == 0) {
995                 pr_debug("infinite mapping received");
996                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
997                 subflow->map_data_len = 0;
998                 return MAPPING_INVALID;
999         }
1000
1001         if (mpext->data_fin == 1) {
1002                 if (data_len == 1) {
1003                         bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1004                                                                  mpext->dsn64);
1005                         pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
1006                         if (subflow->map_valid) {
1007                                 /* A DATA_FIN might arrive in a DSS
1008                                  * option before the previous mapping
1009                                  * has been fully consumed. Continue
1010                                  * handling the existing mapping.
1011                                  */
1012                                 skb_ext_del(skb, SKB_EXT_MPTCP);
1013                                 return MAPPING_OK;
1014                         } else {
1015                                 if (updated && schedule_work(&msk->work))
1016                                         sock_hold((struct sock *)msk);
1017
1018                                 return MAPPING_DATA_FIN;
1019                         }
1020                 } else {
1021                         u64 data_fin_seq = mpext->data_seq + data_len - 1;
1022
1023                         /* If mpext->data_seq is a 32-bit value, data_fin_seq
1024                          * must also be limited to 32 bits.
1025                          */
1026                         if (!mpext->dsn64)
1027                                 data_fin_seq &= GENMASK_ULL(31, 0);
1028
1029                         mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1030                         pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1031                                  data_fin_seq, mpext->dsn64);
1032                 }
1033
1034                 /* Adjust for DATA_FIN using 1 byte of sequence space */
1035                 data_len--;
1036         }
1037
1038         map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1039         WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1040
1041         if (subflow->map_valid) {
1042                 /* Allow replacing only with an identical map */
1043                 if (subflow->map_seq == map_seq &&
1044                     subflow->map_subflow_seq == mpext->subflow_seq &&
1045                     subflow->map_data_len == data_len &&
1046                     subflow->map_csum_reqd == mpext->csum_reqd) {
1047                         skb_ext_del(skb, SKB_EXT_MPTCP);
1048                         goto validate_csum;
1049                 }
1050
1051                 /* If this skb data are fully covered by the current mapping,
1052                  * the new map would need caching, which is not supported
1053                  */
1054                 if (skb_is_fully_mapped(ssk, skb)) {
1055                         MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1056                         return MAPPING_INVALID;
1057                 }
1058
1059                 /* will validate the next map after consuming the current one */
1060                 goto validate_csum;
1061         }
1062
1063         subflow->map_seq = map_seq;
1064         subflow->map_subflow_seq = mpext->subflow_seq;
1065         subflow->map_data_len = data_len;
1066         subflow->map_valid = 1;
1067         subflow->map_data_fin = mpext->data_fin;
1068         subflow->mpc_map = mpext->mpc_map;
1069         subflow->map_csum_reqd = mpext->csum_reqd;
1070         subflow->map_csum_len = 0;
1071         subflow->map_data_csum = csum_unfold(mpext->csum);
1072
1073         /* Cfr RFC 8684 Section 3.3.0 */
1074         if (unlikely(subflow->map_csum_reqd != csum_reqd))
1075                 return MAPPING_INVALID;
1076
1077         pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1078                  subflow->map_seq, subflow->map_subflow_seq,
1079                  subflow->map_data_len, subflow->map_csum_reqd,
1080                  subflow->map_data_csum);
1081
1082 validate_seq:
1083         /* we revalidate valid mapping on new skb, because we must ensure
1084          * the current skb is completely covered by the available mapping
1085          */
1086         if (!validate_mapping(ssk, skb)) {
1087                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1088                 return MAPPING_INVALID;
1089         }
1090
1091         skb_ext_del(skb, SKB_EXT_MPTCP);
1092
1093 validate_csum:
1094         return validate_data_csum(ssk, skb, csum_reqd);
1095 }
1096
1097 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1098                                        u64 limit)
1099 {
1100         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1101         bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1102         u32 incr;
1103
1104         incr = limit >= skb->len ? skb->len + fin : limit;
1105
1106         pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
1107                  subflow->map_subflow_seq);
1108         MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1109         tcp_sk(ssk)->copied_seq += incr;
1110         if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1111                 sk_eat_skb(ssk, skb);
1112         if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1113                 subflow->map_valid = 0;
1114 }
1115
1116 /* sched mptcp worker to remove the subflow if no more data is pending */
1117 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1118 {
1119         struct sock *sk = (struct sock *)msk;
1120
1121         if (likely(ssk->sk_state != TCP_CLOSE))
1122                 return;
1123
1124         if (skb_queue_empty(&ssk->sk_receive_queue) &&
1125             !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
1126                 sock_hold(sk);
1127                 if (!schedule_work(&msk->work))
1128                         sock_put(sk);
1129         }
1130 }
1131
1132 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
1133 {
1134         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1135
1136         if (subflow->mp_join)
1137                 return false;
1138         else if (READ_ONCE(msk->csum_enabled))
1139                 return !subflow->valid_csum_seen;
1140         else
1141                 return !subflow->fully_established;
1142 }
1143
1144 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1145 {
1146         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1147         unsigned long fail_tout;
1148
1149         /* greceful failure can happen only on the MPC subflow */
1150         if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1151                 return;
1152
1153         /* since the close timeout take precedence on the fail one,
1154          * no need to start the latter when the first is already set
1155          */
1156         if (sock_flag((struct sock *)msk, SOCK_DEAD))
1157                 return;
1158
1159         /* we don't need extreme accuracy here, use a zero fail_tout as special
1160          * value meaning no fail timeout at all;
1161          */
1162         fail_tout = jiffies + TCP_RTO_MAX;
1163         if (!fail_tout)
1164                 fail_tout = 1;
1165         WRITE_ONCE(subflow->fail_tout, fail_tout);
1166         tcp_send_ack(ssk);
1167
1168         mptcp_reset_timeout(msk, subflow->fail_tout);
1169 }
1170
1171 static bool subflow_check_data_avail(struct sock *ssk)
1172 {
1173         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1174         enum mapping_status status;
1175         struct mptcp_sock *msk;
1176         struct sk_buff *skb;
1177
1178         if (!skb_peek(&ssk->sk_receive_queue))
1179                 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1180         if (subflow->data_avail)
1181                 return true;
1182
1183         msk = mptcp_sk(subflow->conn);
1184         for (;;) {
1185                 u64 ack_seq;
1186                 u64 old_ack;
1187
1188                 status = get_mapping_status(ssk, msk);
1189                 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1190                 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
1191                              status == MAPPING_BAD_CSUM))
1192                         goto fallback;
1193
1194                 if (status != MAPPING_OK)
1195                         goto no_data;
1196
1197                 skb = skb_peek(&ssk->sk_receive_queue);
1198                 if (WARN_ON_ONCE(!skb))
1199                         goto no_data;
1200
1201                 /* if msk lacks the remote key, this subflow must provide an
1202                  * MP_CAPABLE-based mapping
1203                  */
1204                 if (unlikely(!READ_ONCE(msk->can_ack))) {
1205                         if (!subflow->mpc_map)
1206                                 goto fallback;
1207                         WRITE_ONCE(msk->remote_key, subflow->remote_key);
1208                         WRITE_ONCE(msk->ack_seq, subflow->map_seq);
1209                         WRITE_ONCE(msk->can_ack, true);
1210                 }
1211
1212                 old_ack = READ_ONCE(msk->ack_seq);
1213                 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1214                 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1215                          ack_seq);
1216                 if (unlikely(before64(ack_seq, old_ack))) {
1217                         mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1218                         continue;
1219                 }
1220
1221                 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1222                 break;
1223         }
1224         return true;
1225
1226 no_data:
1227         subflow_sched_work_if_closed(msk, ssk);
1228         return false;
1229
1230 fallback:
1231         if (!__mptcp_check_fallback(msk)) {
1232                 /* RFC 8684 section 3.7. */
1233                 if (status == MAPPING_BAD_CSUM &&
1234                     (subflow->mp_join || subflow->valid_csum_seen)) {
1235                         subflow->send_mp_fail = 1;
1236
1237                         if (!READ_ONCE(msk->allow_infinite_fallback)) {
1238                                 subflow->reset_transient = 0;
1239                                 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1240                                 goto reset;
1241                         }
1242                         mptcp_subflow_fail(msk, ssk);
1243                         WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1244                         return true;
1245                 }
1246
1247                 if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
1248                         /* fatal protocol error, close the socket.
1249                          * subflow_error_report() will introduce the appropriate barriers
1250                          */
1251                         subflow->reset_transient = 0;
1252                         subflow->reset_reason = MPTCP_RST_EMPTCP;
1253
1254 reset:
1255                         ssk->sk_err = EBADMSG;
1256                         tcp_set_state(ssk, TCP_CLOSE);
1257                         while ((skb = skb_peek(&ssk->sk_receive_queue)))
1258                                 sk_eat_skb(ssk, skb);
1259                         tcp_send_active_reset(ssk, GFP_ATOMIC);
1260                         WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1261                         return false;
1262                 }
1263
1264                 mptcp_do_fallback(ssk);
1265         }
1266
1267         skb = skb_peek(&ssk->sk_receive_queue);
1268         subflow->map_valid = 1;
1269         subflow->map_seq = READ_ONCE(msk->ack_seq);
1270         subflow->map_data_len = skb->len;
1271         subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1272         WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1273         return true;
1274 }
1275
1276 bool mptcp_subflow_data_available(struct sock *sk)
1277 {
1278         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1279
1280         /* check if current mapping is still valid */
1281         if (subflow->map_valid &&
1282             mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1283                 subflow->map_valid = 0;
1284                 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1285
1286                 pr_debug("Done with mapping: seq=%u data_len=%u",
1287                          subflow->map_subflow_seq,
1288                          subflow->map_data_len);
1289         }
1290
1291         return subflow_check_data_avail(sk);
1292 }
1293
1294 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1295  * not the ssk one.
1296  *
1297  * In mptcp, rwin is about the mptcp-level connection data.
1298  *
1299  * Data that is still on the ssk rx queue can thus be ignored,
1300  * as far as mptcp peer is concerned that data is still inflight.
1301  * DSS ACK is updated when skb is moved to the mptcp rx queue.
1302  */
1303 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1304 {
1305         const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1306         const struct sock *sk = subflow->conn;
1307
1308         *space = __mptcp_space(sk);
1309         *full_space = tcp_full_space(sk);
1310 }
1311
1312 void __mptcp_error_report(struct sock *sk)
1313 {
1314         struct mptcp_subflow_context *subflow;
1315         struct mptcp_sock *msk = mptcp_sk(sk);
1316
1317         mptcp_for_each_subflow(msk, subflow) {
1318                 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1319                 int err = sock_error(ssk);
1320
1321                 if (!err)
1322                         continue;
1323
1324                 /* only propagate errors on fallen-back sockets or
1325                  * on MPC connect
1326                  */
1327                 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
1328                         continue;
1329
1330                 inet_sk_state_store(sk, inet_sk_state_load(ssk));
1331                 sk->sk_err = -err;
1332
1333                 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
1334                 smp_wmb();
1335                 sk_error_report(sk);
1336                 break;
1337         }
1338 }
1339
1340 static void subflow_error_report(struct sock *ssk)
1341 {
1342         struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1343
1344         mptcp_data_lock(sk);
1345         if (!sock_owned_by_user(sk))
1346                 __mptcp_error_report(sk);
1347         else
1348                 __set_bit(MPTCP_ERROR_REPORT,  &mptcp_sk(sk)->cb_flags);
1349         mptcp_data_unlock(sk);
1350 }
1351
1352 static void subflow_data_ready(struct sock *sk)
1353 {
1354         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1355         u16 state = 1 << inet_sk_state_load(sk);
1356         struct sock *parent = subflow->conn;
1357         struct mptcp_sock *msk;
1358
1359         msk = mptcp_sk(parent);
1360         if (state & TCPF_LISTEN) {
1361                 /* MPJ subflow are removed from accept queue before reaching here,
1362                  * avoid stray wakeups
1363                  */
1364                 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1365                         return;
1366
1367                 parent->sk_data_ready(parent);
1368                 return;
1369         }
1370
1371         WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1372                      !subflow->mp_join && !(state & TCPF_CLOSE));
1373
1374         if (mptcp_subflow_data_available(sk))
1375                 mptcp_data_ready(parent, sk);
1376         else if (unlikely(sk->sk_err))
1377                 subflow_error_report(sk);
1378 }
1379
1380 static void subflow_write_space(struct sock *ssk)
1381 {
1382         struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1383
1384         mptcp_propagate_sndbuf(sk, ssk);
1385         mptcp_write_space(sk);
1386 }
1387
1388 static const struct inet_connection_sock_af_ops *
1389 subflow_default_af_ops(struct sock *sk)
1390 {
1391 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1392         if (sk->sk_family == AF_INET6)
1393                 return &subflow_v6_specific;
1394 #endif
1395         return &subflow_specific;
1396 }
1397
1398 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1399 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1400 {
1401         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1402         struct inet_connection_sock *icsk = inet_csk(sk);
1403         const struct inet_connection_sock_af_ops *target;
1404
1405         target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1406
1407         pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1408                  subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1409
1410         if (likely(icsk->icsk_af_ops == target))
1411                 return;
1412
1413         subflow->icsk_af_ops = icsk->icsk_af_ops;
1414         icsk->icsk_af_ops = target;
1415 }
1416 #endif
1417
1418 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1419                          struct sockaddr_storage *addr,
1420                          unsigned short family)
1421 {
1422         memset(addr, 0, sizeof(*addr));
1423         addr->ss_family = family;
1424         if (addr->ss_family == AF_INET) {
1425                 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1426
1427                 if (info->family == AF_INET)
1428                         in_addr->sin_addr = info->addr;
1429 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1430                 else if (ipv6_addr_v4mapped(&info->addr6))
1431                         in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1432 #endif
1433                 in_addr->sin_port = info->port;
1434         }
1435 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1436         else if (addr->ss_family == AF_INET6) {
1437                 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1438
1439                 if (info->family == AF_INET)
1440                         ipv6_addr_set_v4mapped(info->addr.s_addr,
1441                                                &in6_addr->sin6_addr);
1442                 else
1443                         in6_addr->sin6_addr = info->addr6;
1444                 in6_addr->sin6_port = info->port;
1445         }
1446 #endif
1447 }
1448
1449 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1450                             const struct mptcp_addr_info *remote)
1451 {
1452         struct mptcp_sock *msk = mptcp_sk(sk);
1453         struct mptcp_subflow_context *subflow;
1454         struct sockaddr_storage addr;
1455         int remote_id = remote->id;
1456         int local_id = loc->id;
1457         int err = -ENOTCONN;
1458         struct socket *sf;
1459         struct sock *ssk;
1460         u32 remote_token;
1461         int addrlen;
1462         int ifindex;
1463         u8 flags;
1464
1465         if (!mptcp_is_fully_established(sk))
1466                 goto err_out;
1467
1468         err = mptcp_subflow_create_socket(sk, &sf);
1469         if (err)
1470                 goto err_out;
1471
1472         ssk = sf->sk;
1473         subflow = mptcp_subflow_ctx(ssk);
1474         do {
1475                 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1476         } while (!subflow->local_nonce);
1477
1478         if (local_id)
1479                 subflow_set_local_id(subflow, local_id);
1480
1481         mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
1482                                              &flags, &ifindex);
1483         subflow->remote_key = msk->remote_key;
1484         subflow->local_key = msk->local_key;
1485         subflow->token = msk->token;
1486         mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1487
1488         addrlen = sizeof(struct sockaddr_in);
1489 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1490         if (addr.ss_family == AF_INET6)
1491                 addrlen = sizeof(struct sockaddr_in6);
1492 #endif
1493         mptcp_sockopt_sync(msk, ssk);
1494
1495         ssk->sk_bound_dev_if = ifindex;
1496         err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1497         if (err)
1498                 goto failed;
1499
1500         mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1501         pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1502                  remote_token, local_id, remote_id);
1503         subflow->remote_token = remote_token;
1504         subflow->remote_id = remote_id;
1505         subflow->request_join = 1;
1506         subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1507         mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1508
1509         sock_hold(ssk);
1510         list_add_tail(&subflow->node, &msk->conn_list);
1511         err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1512         if (err && err != -EINPROGRESS)
1513                 goto failed_unlink;
1514
1515         /* discard the subflow socket */
1516         mptcp_sock_graft(ssk, sk->sk_socket);
1517         iput(SOCK_INODE(sf));
1518         WRITE_ONCE(msk->allow_infinite_fallback, false);
1519         return 0;
1520
1521 failed_unlink:
1522         list_del(&subflow->node);
1523         sock_put(mptcp_subflow_tcp_sock(subflow));
1524
1525 failed:
1526         subflow->disposable = 1;
1527         sock_release(sf);
1528
1529 err_out:
1530         /* we account subflows before the creation, and this failures will not
1531          * be caught by sk_state_change()
1532          */
1533         mptcp_pm_close_subflow(msk);
1534         return err;
1535 }
1536
1537 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1538 {
1539 #ifdef CONFIG_SOCK_CGROUP_DATA
1540         struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1541                                 *child_skcd = &child->sk_cgrp_data;
1542
1543         /* only the additional subflows created by kworkers have to be modified */
1544         if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1545             cgroup_id(sock_cgroup_ptr(child_skcd))) {
1546 #ifdef CONFIG_MEMCG
1547                 struct mem_cgroup *memcg = parent->sk_memcg;
1548
1549                 mem_cgroup_sk_free(child);
1550                 if (memcg && css_tryget(&memcg->css))
1551                         child->sk_memcg = memcg;
1552 #endif /* CONFIG_MEMCG */
1553
1554                 cgroup_sk_free(child_skcd);
1555                 *child_skcd = *parent_skcd;
1556                 cgroup_sk_clone(child_skcd);
1557         }
1558 #endif /* CONFIG_SOCK_CGROUP_DATA */
1559 }
1560
1561 static void mptcp_subflow_ops_override(struct sock *ssk)
1562 {
1563 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1564         if (ssk->sk_prot == &tcpv6_prot)
1565                 ssk->sk_prot = &tcpv6_prot_override;
1566         else
1567 #endif
1568                 ssk->sk_prot = &tcp_prot_override;
1569 }
1570
1571 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1572 {
1573 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1574         if (ssk->sk_prot == &tcpv6_prot_override)
1575                 ssk->sk_prot = &tcpv6_prot;
1576         else
1577 #endif
1578                 ssk->sk_prot = &tcp_prot;
1579 }
1580 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1581 {
1582         struct mptcp_subflow_context *subflow;
1583         struct net *net = sock_net(sk);
1584         struct socket *sf;
1585         int err;
1586
1587         /* un-accepted server sockets can reach here - on bad configuration
1588          * bail early to avoid greater trouble later
1589          */
1590         if (unlikely(!sk->sk_socket))
1591                 return -EINVAL;
1592
1593         err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1594                                &sf);
1595         if (err)
1596                 return err;
1597
1598         lock_sock(sf->sk);
1599
1600         /* the newly created socket has to be in the same cgroup as its parent */
1601         mptcp_attach_cgroup(sk, sf->sk);
1602
1603         /* kernel sockets do not by default acquire net ref, but TCP timer
1604          * needs it.
1605          */
1606         sf->sk->sk_net_refcnt = 1;
1607         get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1608         sock_inuse_add(net, 1);
1609         err = tcp_set_ulp(sf->sk, "mptcp");
1610         release_sock(sf->sk);
1611
1612         if (err) {
1613                 sock_release(sf);
1614                 return err;
1615         }
1616
1617         /* the newly created socket really belongs to the owning MPTCP master
1618          * socket, even if for additional subflows the allocation is performed
1619          * by a kernel workqueue. Adjust inode references, so that the
1620          * procfs/diag interfaces really show this one belonging to the correct
1621          * user.
1622          */
1623         SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1624         SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1625         SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1626
1627         subflow = mptcp_subflow_ctx(sf->sk);
1628         pr_debug("subflow=%p", subflow);
1629
1630         *new_sock = sf;
1631         sock_hold(sk);
1632         subflow->conn = sk;
1633         mptcp_subflow_ops_override(sf->sk);
1634
1635         return 0;
1636 }
1637
1638 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1639                                                         gfp_t priority)
1640 {
1641         struct inet_connection_sock *icsk = inet_csk(sk);
1642         struct mptcp_subflow_context *ctx;
1643
1644         ctx = kzalloc(sizeof(*ctx), priority);
1645         if (!ctx)
1646                 return NULL;
1647
1648         rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1649         INIT_LIST_HEAD(&ctx->node);
1650         INIT_LIST_HEAD(&ctx->delegated_node);
1651
1652         pr_debug("subflow=%p", ctx);
1653
1654         ctx->tcp_sock = sk;
1655
1656         return ctx;
1657 }
1658
1659 static void __subflow_state_change(struct sock *sk)
1660 {
1661         struct socket_wq *wq;
1662
1663         rcu_read_lock();
1664         wq = rcu_dereference(sk->sk_wq);
1665         if (skwq_has_sleeper(wq))
1666                 wake_up_interruptible_all(&wq->wait);
1667         rcu_read_unlock();
1668 }
1669
1670 static bool subflow_is_done(const struct sock *sk)
1671 {
1672         return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1673 }
1674
1675 static void subflow_state_change(struct sock *sk)
1676 {
1677         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1678         struct sock *parent = subflow->conn;
1679
1680         __subflow_state_change(sk);
1681
1682         if (subflow_simultaneous_connect(sk)) {
1683                 mptcp_propagate_sndbuf(parent, sk);
1684                 mptcp_do_fallback(sk);
1685                 mptcp_rcv_space_init(mptcp_sk(parent), sk);
1686                 pr_fallback(mptcp_sk(parent));
1687                 subflow->conn_finished = 1;
1688                 mptcp_set_connected(parent);
1689         }
1690
1691         /* as recvmsg() does not acquire the subflow socket for ssk selection
1692          * a fin packet carrying a DSS can be unnoticed if we don't trigger
1693          * the data available machinery here.
1694          */
1695         if (mptcp_subflow_data_available(sk))
1696                 mptcp_data_ready(parent, sk);
1697         else if (unlikely(sk->sk_err))
1698                 subflow_error_report(sk);
1699
1700         subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1701
1702         if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1703             !subflow->rx_eof && subflow_is_done(sk)) {
1704                 subflow->rx_eof = 1;
1705                 mptcp_subflow_eof(parent);
1706         }
1707 }
1708
1709 void mptcp_subflow_queue_clean(struct sock *listener_ssk)
1710 {
1711         struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
1712         struct mptcp_sock *msk, *next, *head = NULL;
1713         struct request_sock *req;
1714
1715         /* build a list of all unaccepted mptcp sockets */
1716         spin_lock_bh(&queue->rskq_lock);
1717         for (req = queue->rskq_accept_head; req; req = req->dl_next) {
1718                 struct mptcp_subflow_context *subflow;
1719                 struct sock *ssk = req->sk;
1720                 struct mptcp_sock *msk;
1721
1722                 if (!sk_is_mptcp(ssk))
1723                         continue;
1724
1725                 subflow = mptcp_subflow_ctx(ssk);
1726                 if (!subflow || !subflow->conn)
1727                         continue;
1728
1729                 /* skip if already in list */
1730                 msk = mptcp_sk(subflow->conn);
1731                 if (msk->dl_next || msk == head)
1732                         continue;
1733
1734                 msk->dl_next = head;
1735                 head = msk;
1736         }
1737         spin_unlock_bh(&queue->rskq_lock);
1738         if (!head)
1739                 return;
1740
1741         /* can't acquire the msk socket lock under the subflow one,
1742          * or will cause ABBA deadlock
1743          */
1744         release_sock(listener_ssk);
1745
1746         for (msk = head; msk; msk = next) {
1747                 struct sock *sk = (struct sock *)msk;
1748                 bool do_cancel_work;
1749
1750                 sock_hold(sk);
1751                 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1752                 next = msk->dl_next;
1753                 msk->first = NULL;
1754                 msk->dl_next = NULL;
1755
1756                 do_cancel_work = __mptcp_close(sk, 0);
1757                 release_sock(sk);
1758                 if (do_cancel_work)
1759                         mptcp_cancel_work(sk);
1760                 sock_put(sk);
1761         }
1762
1763         /* we are still under the listener msk socket lock */
1764         lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
1765 }
1766
1767 static int subflow_ulp_init(struct sock *sk)
1768 {
1769         struct inet_connection_sock *icsk = inet_csk(sk);
1770         struct mptcp_subflow_context *ctx;
1771         struct tcp_sock *tp = tcp_sk(sk);
1772         int err = 0;
1773
1774         /* disallow attaching ULP to a socket unless it has been
1775          * created with sock_create_kern()
1776          */
1777         if (!sk->sk_kern_sock) {
1778                 err = -EOPNOTSUPP;
1779                 goto out;
1780         }
1781
1782         ctx = subflow_create_ctx(sk, GFP_KERNEL);
1783         if (!ctx) {
1784                 err = -ENOMEM;
1785                 goto out;
1786         }
1787
1788         pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1789
1790         tp->is_mptcp = 1;
1791         ctx->icsk_af_ops = icsk->icsk_af_ops;
1792         icsk->icsk_af_ops = subflow_default_af_ops(sk);
1793         ctx->tcp_state_change = sk->sk_state_change;
1794         ctx->tcp_error_report = sk->sk_error_report;
1795
1796         WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
1797         WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
1798
1799         sk->sk_data_ready = subflow_data_ready;
1800         sk->sk_write_space = subflow_write_space;
1801         sk->sk_state_change = subflow_state_change;
1802         sk->sk_error_report = subflow_error_report;
1803 out:
1804         return err;
1805 }
1806
1807 static void subflow_ulp_release(struct sock *ssk)
1808 {
1809         struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1810         bool release = true;
1811         struct sock *sk;
1812
1813         if (!ctx)
1814                 return;
1815
1816         sk = ctx->conn;
1817         if (sk) {
1818                 /* if the msk has been orphaned, keep the ctx
1819                  * alive, will be freed by __mptcp_close_ssk(),
1820                  * when the subflow is still unaccepted
1821                  */
1822                 release = ctx->disposable || list_empty(&ctx->node);
1823                 sock_put(sk);
1824         }
1825
1826         mptcp_subflow_ops_undo_override(ssk);
1827         if (release)
1828                 kfree_rcu(ctx, rcu);
1829 }
1830
1831 static void subflow_ulp_clone(const struct request_sock *req,
1832                               struct sock *newsk,
1833                               const gfp_t priority)
1834 {
1835         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1836         struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1837         struct mptcp_subflow_context *new_ctx;
1838
1839         if (!tcp_rsk(req)->is_mptcp ||
1840             (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1841                 subflow_ulp_fallback(newsk, old_ctx);
1842                 return;
1843         }
1844
1845         new_ctx = subflow_create_ctx(newsk, priority);
1846         if (!new_ctx) {
1847                 subflow_ulp_fallback(newsk, old_ctx);
1848                 return;
1849         }
1850
1851         new_ctx->conn_finished = 1;
1852         new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1853         new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1854         new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1855         new_ctx->rel_write_seq = 1;
1856         new_ctx->tcp_sock = newsk;
1857
1858         if (subflow_req->mp_capable) {
1859                 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1860                  * is fully established only after we receive the remote key
1861                  */
1862                 new_ctx->mp_capable = 1;
1863                 new_ctx->local_key = subflow_req->local_key;
1864                 new_ctx->token = subflow_req->token;
1865                 new_ctx->ssn_offset = subflow_req->ssn_offset;
1866                 new_ctx->idsn = subflow_req->idsn;
1867
1868                 /* this is the first subflow, id is always 0 */
1869                 new_ctx->local_id_valid = 1;
1870         } else if (subflow_req->mp_join) {
1871                 new_ctx->ssn_offset = subflow_req->ssn_offset;
1872                 new_ctx->mp_join = 1;
1873                 new_ctx->fully_established = 1;
1874                 new_ctx->backup = subflow_req->backup;
1875                 new_ctx->remote_id = subflow_req->remote_id;
1876                 new_ctx->token = subflow_req->token;
1877                 new_ctx->thmac = subflow_req->thmac;
1878
1879                 /* the subflow req id is valid, fetched via subflow_check_req()
1880                  * and subflow_token_join_request()
1881                  */
1882                 subflow_set_local_id(new_ctx, subflow_req->local_id);
1883         }
1884 }
1885
1886 static void tcp_release_cb_override(struct sock *ssk)
1887 {
1888         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1889
1890         if (mptcp_subflow_has_delegated_action(subflow))
1891                 mptcp_subflow_process_delegated(ssk);
1892
1893         tcp_release_cb(ssk);
1894 }
1895
1896 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1897         .name           = "mptcp",
1898         .owner          = THIS_MODULE,
1899         .init           = subflow_ulp_init,
1900         .release        = subflow_ulp_release,
1901         .clone          = subflow_ulp_clone,
1902 };
1903
1904 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1905 {
1906         subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1907         subflow_ops->slab_name = "request_sock_subflow";
1908
1909         subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1910                                               subflow_ops->obj_size, 0,
1911                                               SLAB_ACCOUNT |
1912                                               SLAB_TYPESAFE_BY_RCU,
1913                                               NULL);
1914         if (!subflow_ops->slab)
1915                 return -ENOMEM;
1916
1917         subflow_ops->destructor = subflow_req_destructor;
1918
1919         return 0;
1920 }
1921
1922 void __init mptcp_subflow_init(void)
1923 {
1924         mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
1925         if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
1926                 panic("MPTCP: failed to init subflow request sock ops\n");
1927
1928         subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1929         subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
1930
1931         subflow_specific = ipv4_specific;
1932         subflow_specific.conn_request = subflow_v4_conn_request;
1933         subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1934         subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1935         subflow_specific.rebuild_header = subflow_rebuild_header;
1936
1937         tcp_prot_override = tcp_prot;
1938         tcp_prot_override.release_cb = tcp_release_cb_override;
1939
1940 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1941         subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1942         subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
1943
1944         subflow_v6_specific = ipv6_specific;
1945         subflow_v6_specific.conn_request = subflow_v6_conn_request;
1946         subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1947         subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1948         subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
1949
1950         subflow_v6m_specific = subflow_v6_specific;
1951         subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1952         subflow_v6m_specific.send_check = ipv4_specific.send_check;
1953         subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1954         subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1955         subflow_v6m_specific.net_frag_header_len = 0;
1956         subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
1957
1958         tcpv6_prot_override = tcpv6_prot;
1959         tcpv6_prot_override.release_cb = tcp_release_cb_override;
1960 #endif
1961
1962         mptcp_diag_subflow_init(&subflow_ulp_ops);
1963
1964         if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1965                 panic("MPTCP: failed to register subflows to ULP\n");
1966 }