mptcp: don't leak msk in token container
[platform/kernel/linux-starfive.git] / net / mptcp / subflow.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #endif
22 #include <net/mptcp.h>
23 #include "protocol.h"
24 #include "mib.h"
25
26 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
27                                   enum linux_mptcp_mib_field field)
28 {
29         MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
30 }
31
32 static int subflow_rebuild_header(struct sock *sk)
33 {
34         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
35         int local_id, err = 0;
36
37         if (subflow->request_mptcp && !subflow->token) {
38                 pr_debug("subflow=%p", sk);
39                 err = mptcp_token_new_connect(sk);
40         } else if (subflow->request_join && !subflow->local_nonce) {
41                 struct mptcp_sock *msk = (struct mptcp_sock *)subflow->conn;
42
43                 pr_debug("subflow=%p", sk);
44
45                 do {
46                         get_random_bytes(&subflow->local_nonce, sizeof(u32));
47                 } while (!subflow->local_nonce);
48
49                 if (subflow->local_id)
50                         goto out;
51
52                 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
53                 if (local_id < 0)
54                         return -EINVAL;
55
56                 subflow->local_id = local_id;
57         }
58
59 out:
60         if (err)
61                 return err;
62
63         return subflow->icsk_af_ops->rebuild_header(sk);
64 }
65
66 static void subflow_req_destructor(struct request_sock *req)
67 {
68         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
69
70         pr_debug("subflow_req=%p", subflow_req);
71
72         if (subflow_req->mp_capable)
73                 mptcp_token_destroy_request(subflow_req->token);
74         tcp_request_sock_ops.destructor(req);
75 }
76
77 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
78                                   void *hmac)
79 {
80         u8 msg[8];
81
82         put_unaligned_be32(nonce1, &msg[0]);
83         put_unaligned_be32(nonce2, &msg[4]);
84
85         mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
86 }
87
88 /* validate received token and create truncated hmac and nonce for SYN-ACK */
89 static bool subflow_token_join_request(struct request_sock *req,
90                                        const struct sk_buff *skb)
91 {
92         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
93         u8 hmac[SHA256_DIGEST_SIZE];
94         struct mptcp_sock *msk;
95         int local_id;
96
97         msk = mptcp_token_get_sock(subflow_req->token);
98         if (!msk) {
99                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
100                 return false;
101         }
102
103         local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
104         if (local_id < 0) {
105                 sock_put((struct sock *)msk);
106                 return false;
107         }
108         subflow_req->local_id = local_id;
109
110         get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
111
112         subflow_generate_hmac(msk->local_key, msk->remote_key,
113                               subflow_req->local_nonce,
114                               subflow_req->remote_nonce, hmac);
115
116         subflow_req->thmac = get_unaligned_be64(hmac);
117
118         sock_put((struct sock *)msk);
119         return true;
120 }
121
122 static void subflow_init_req(struct request_sock *req,
123                              const struct sock *sk_listener,
124                              struct sk_buff *skb)
125 {
126         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
127         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
128         struct mptcp_options_received mp_opt;
129
130         pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
131
132         mptcp_get_options(skb, &mp_opt);
133
134         subflow_req->mp_capable = 0;
135         subflow_req->mp_join = 0;
136
137 #ifdef CONFIG_TCP_MD5SIG
138         /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
139          * TCP option space.
140          */
141         if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
142                 return;
143 #endif
144
145         if (mp_opt.mp_capable) {
146                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
147
148                 if (mp_opt.mp_join)
149                         return;
150         } else if (mp_opt.mp_join) {
151                 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
152         }
153
154         if (mp_opt.mp_capable && listener->request_mptcp) {
155                 int err;
156
157                 err = mptcp_token_new_request(req);
158                 if (err == 0)
159                         subflow_req->mp_capable = 1;
160
161                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
162         } else if (mp_opt.mp_join && listener->request_mptcp) {
163                 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
164                 subflow_req->mp_join = 1;
165                 subflow_req->backup = mp_opt.backup;
166                 subflow_req->remote_id = mp_opt.join_id;
167                 subflow_req->token = mp_opt.token;
168                 subflow_req->remote_nonce = mp_opt.nonce;
169                 pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
170                          subflow_req->remote_nonce);
171                 if (!subflow_token_join_request(req, skb)) {
172                         subflow_req->mp_join = 0;
173                         // @@ need to trigger RST
174                 }
175         }
176 }
177
178 static void subflow_v4_init_req(struct request_sock *req,
179                                 const struct sock *sk_listener,
180                                 struct sk_buff *skb)
181 {
182         tcp_rsk(req)->is_mptcp = 1;
183
184         tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
185
186         subflow_init_req(req, sk_listener, skb);
187 }
188
189 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
190 static void subflow_v6_init_req(struct request_sock *req,
191                                 const struct sock *sk_listener,
192                                 struct sk_buff *skb)
193 {
194         tcp_rsk(req)->is_mptcp = 1;
195
196         tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
197
198         subflow_init_req(req, sk_listener, skb);
199 }
200 #endif
201
202 /* validate received truncated hmac and create hmac for third ACK */
203 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
204 {
205         u8 hmac[SHA256_DIGEST_SIZE];
206         u64 thmac;
207
208         subflow_generate_hmac(subflow->remote_key, subflow->local_key,
209                               subflow->remote_nonce, subflow->local_nonce,
210                               hmac);
211
212         thmac = get_unaligned_be64(hmac);
213         pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
214                  subflow, subflow->token,
215                  (unsigned long long)thmac,
216                  (unsigned long long)subflow->thmac);
217
218         return thmac == subflow->thmac;
219 }
220
221 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
222 {
223         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
224         struct mptcp_options_received mp_opt;
225         struct sock *parent = subflow->conn;
226         struct tcp_sock *tp = tcp_sk(sk);
227
228         subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
229
230         if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
231                 inet_sk_state_store(parent, TCP_ESTABLISHED);
232                 parent->sk_state_change(parent);
233         }
234
235         /* be sure no special action on any packet other than syn-ack */
236         if (subflow->conn_finished)
237                 return;
238
239         subflow->conn_finished = 1;
240
241         mptcp_get_options(skb, &mp_opt);
242         if (subflow->request_mptcp && mp_opt.mp_capable) {
243                 subflow->mp_capable = 1;
244                 subflow->can_ack = 1;
245                 subflow->remote_key = mp_opt.sndr_key;
246                 pr_debug("subflow=%p, remote_key=%llu", subflow,
247                          subflow->remote_key);
248         } else if (subflow->request_join && mp_opt.mp_join) {
249                 subflow->mp_join = 1;
250                 subflow->thmac = mp_opt.thmac;
251                 subflow->remote_nonce = mp_opt.nonce;
252                 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
253                          subflow->thmac, subflow->remote_nonce);
254         } else if (subflow->request_mptcp) {
255                 tp->is_mptcp = 0;
256         }
257
258         if (!tp->is_mptcp)
259                 return;
260
261         if (subflow->mp_capable) {
262                 pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk),
263                          subflow->remote_key);
264                 mptcp_finish_connect(sk);
265
266                 if (skb) {
267                         pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq);
268                         subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
269                 }
270         } else if (subflow->mp_join) {
271                 u8 hmac[SHA256_DIGEST_SIZE];
272
273                 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u",
274                          subflow, subflow->thmac,
275                          subflow->remote_nonce);
276                 if (!subflow_thmac_valid(subflow)) {
277                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
278                         subflow->mp_join = 0;
279                         goto do_reset;
280                 }
281
282                 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
283                                       subflow->local_nonce,
284                                       subflow->remote_nonce,
285                                       hmac);
286
287                 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
288
289                 if (skb)
290                         subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
291
292                 if (!mptcp_finish_join(sk))
293                         goto do_reset;
294
295                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
296         } else {
297 do_reset:
298                 tcp_send_active_reset(sk, GFP_ATOMIC);
299                 tcp_done(sk);
300         }
301 }
302
303 static struct request_sock_ops subflow_request_sock_ops;
304 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
305
306 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
307 {
308         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
309
310         pr_debug("subflow=%p", subflow);
311
312         /* Never answer to SYNs sent to broadcast or multicast */
313         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
314                 goto drop;
315
316         return tcp_conn_request(&subflow_request_sock_ops,
317                                 &subflow_request_sock_ipv4_ops,
318                                 sk, skb);
319 drop:
320         tcp_listendrop(sk);
321         return 0;
322 }
323
324 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
325 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
326 static struct inet_connection_sock_af_ops subflow_v6_specific;
327 static struct inet_connection_sock_af_ops subflow_v6m_specific;
328
329 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
330 {
331         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
332
333         pr_debug("subflow=%p", subflow);
334
335         if (skb->protocol == htons(ETH_P_IP))
336                 return subflow_v4_conn_request(sk, skb);
337
338         if (!ipv6_unicast_destination(skb))
339                 goto drop;
340
341         return tcp_conn_request(&subflow_request_sock_ops,
342                                 &subflow_request_sock_ipv6_ops, sk, skb);
343
344 drop:
345         tcp_listendrop(sk);
346         return 0; /* don't send reset */
347 }
348 #endif
349
350 /* validate hmac received in third ACK */
351 static bool subflow_hmac_valid(const struct request_sock *req,
352                                const struct mptcp_options_received *mp_opt)
353 {
354         const struct mptcp_subflow_request_sock *subflow_req;
355         u8 hmac[SHA256_DIGEST_SIZE];
356         struct mptcp_sock *msk;
357         bool ret;
358
359         subflow_req = mptcp_subflow_rsk(req);
360         msk = mptcp_token_get_sock(subflow_req->token);
361         if (!msk)
362                 return false;
363
364         subflow_generate_hmac(msk->remote_key, msk->local_key,
365                               subflow_req->remote_nonce,
366                               subflow_req->local_nonce, hmac);
367
368         ret = true;
369         if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN))
370                 ret = false;
371
372         sock_put((struct sock *)msk);
373         return ret;
374 }
375
376 static void mptcp_sock_destruct(struct sock *sk)
377 {
378         /* if new mptcp socket isn't accepted, it is free'd
379          * from the tcp listener sockets request queue, linked
380          * from req->sk.  The tcp socket is released.
381          * This calls the ULP release function which will
382          * also remove the mptcp socket, via
383          * sock_put(ctx->conn).
384          *
385          * Problem is that the mptcp socket will not be in
386          * SYN_RECV state and doesn't have SOCK_DEAD flag.
387          * Both result in warnings from inet_sock_destruct.
388          */
389
390         if (sk->sk_state == TCP_SYN_RECV) {
391                 sk->sk_state = TCP_CLOSE;
392                 WARN_ON_ONCE(sk->sk_socket);
393                 sock_orphan(sk);
394         }
395
396         mptcp_token_destroy(mptcp_sk(sk)->token);
397         inet_sock_destruct(sk);
398 }
399
400 static void mptcp_force_close(struct sock *sk)
401 {
402         inet_sk_state_store(sk, TCP_CLOSE);
403         sk_common_release(sk);
404 }
405
406 static void subflow_ulp_fallback(struct sock *sk,
407                                  struct mptcp_subflow_context *old_ctx)
408 {
409         struct inet_connection_sock *icsk = inet_csk(sk);
410
411         mptcp_subflow_tcp_fallback(sk, old_ctx);
412         icsk->icsk_ulp_ops = NULL;
413         rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
414         tcp_sk(sk)->is_mptcp = 0;
415 }
416
417 static void subflow_drop_ctx(struct sock *ssk)
418 {
419         struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
420
421         if (!ctx)
422                 return;
423
424         subflow_ulp_fallback(ssk, ctx);
425         if (ctx->conn)
426                 sock_put(ctx->conn);
427
428         kfree_rcu(ctx, rcu);
429 }
430
431 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
432                                           struct sk_buff *skb,
433                                           struct request_sock *req,
434                                           struct dst_entry *dst,
435                                           struct request_sock *req_unhash,
436                                           bool *own_req)
437 {
438         struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
439         struct mptcp_subflow_request_sock *subflow_req;
440         struct mptcp_options_received mp_opt;
441         bool fallback_is_fatal = false;
442         struct sock *new_msk = NULL;
443         bool fallback = false;
444         struct sock *child;
445
446         pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
447
448         /* we need later a valid 'mp_capable' value even when options are not
449          * parsed
450          */
451         mp_opt.mp_capable = 0;
452         if (tcp_rsk(req)->is_mptcp == 0)
453                 goto create_child;
454
455         /* if the sk is MP_CAPABLE, we try to fetch the client key */
456         subflow_req = mptcp_subflow_rsk(req);
457         if (subflow_req->mp_capable) {
458                 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
459                         /* here we can receive and accept an in-window,
460                          * out-of-order pkt, which will not carry the MP_CAPABLE
461                          * opt even on mptcp enabled paths
462                          */
463                         goto create_msk;
464                 }
465
466                 mptcp_get_options(skb, &mp_opt);
467                 if (!mp_opt.mp_capable) {
468                         fallback = true;
469                         goto create_child;
470                 }
471
472 create_msk:
473                 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
474                 if (!new_msk)
475                         fallback = true;
476         } else if (subflow_req->mp_join) {
477                 fallback_is_fatal = true;
478                 mptcp_get_options(skb, &mp_opt);
479                 if (!mp_opt.mp_join ||
480                     !subflow_hmac_valid(req, &mp_opt)) {
481                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
482                         return NULL;
483                 }
484         }
485
486 create_child:
487         child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
488                                                      req_unhash, own_req);
489
490         if (child && *own_req) {
491                 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
492
493                 tcp_rsk(req)->drop_req = false;
494
495                 /* we need to fallback on ctx allocation failure and on pre-reqs
496                  * checking above. In the latter scenario we additionally need
497                  * to reset the context to non MPTCP status.
498                  */
499                 if (!ctx || fallback) {
500                         if (fallback_is_fatal)
501                                 goto dispose_child;
502
503                         subflow_drop_ctx(child);
504                         goto out;
505                 }
506
507                 if (ctx->mp_capable) {
508                         /* new mpc subflow takes ownership of the newly
509                          * created mptcp socket
510                          */
511                         new_msk->sk_destruct = mptcp_sock_destruct;
512                         mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
513                         ctx->conn = new_msk;
514                         new_msk = NULL;
515
516                         /* with OoO packets we can reach here without ingress
517                          * mpc option
518                          */
519                         ctx->remote_key = mp_opt.sndr_key;
520                         ctx->fully_established = mp_opt.mp_capable;
521                         ctx->can_ack = mp_opt.mp_capable;
522                 } else if (ctx->mp_join) {
523                         struct mptcp_sock *owner;
524
525                         owner = mptcp_token_get_sock(ctx->token);
526                         if (!owner)
527                                 goto dispose_child;
528
529                         ctx->conn = (struct sock *)owner;
530                         if (!mptcp_finish_join(child))
531                                 goto dispose_child;
532
533                         SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
534                         tcp_rsk(req)->drop_req = true;
535                 }
536         }
537
538 out:
539         /* dispose of the left over mptcp master, if any */
540         if (unlikely(new_msk))
541                 mptcp_force_close(new_msk);
542
543         /* check for expected invariant - should never trigger, just help
544          * catching eariler subtle bugs
545          */
546         WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
547                      (!mptcp_subflow_ctx(child) ||
548                       !mptcp_subflow_ctx(child)->conn));
549         return child;
550
551 dispose_child:
552         subflow_drop_ctx(child);
553         tcp_rsk(req)->drop_req = true;
554         tcp_send_active_reset(child, GFP_ATOMIC);
555         inet_csk_prepare_for_destroy_sock(child);
556         tcp_done(child);
557
558         /* The last child reference will be released by the caller */
559         return child;
560 }
561
562 static struct inet_connection_sock_af_ops subflow_specific;
563
564 enum mapping_status {
565         MAPPING_OK,
566         MAPPING_INVALID,
567         MAPPING_EMPTY,
568         MAPPING_DATA_FIN
569 };
570
571 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
572 {
573         if ((u32)seq == (u32)old_seq)
574                 return old_seq;
575
576         /* Assume map covers data not mapped yet. */
577         return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
578 }
579
580 static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
581 {
582         WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
583                   ssn, subflow->map_subflow_seq, subflow->map_data_len);
584 }
585
586 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
587 {
588         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
589         unsigned int skb_consumed;
590
591         skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
592         if (WARN_ON_ONCE(skb_consumed >= skb->len))
593                 return true;
594
595         return skb->len - skb_consumed <= subflow->map_data_len -
596                                           mptcp_subflow_get_map_offset(subflow);
597 }
598
599 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
600 {
601         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
602         u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
603
604         if (unlikely(before(ssn, subflow->map_subflow_seq))) {
605                 /* Mapping covers data later in the subflow stream,
606                  * currently unsupported.
607                  */
608                 warn_bad_map(subflow, ssn);
609                 return false;
610         }
611         if (unlikely(!before(ssn, subflow->map_subflow_seq +
612                                   subflow->map_data_len))) {
613                 /* Mapping does covers past subflow data, invalid */
614                 warn_bad_map(subflow, ssn + skb->len);
615                 return false;
616         }
617         return true;
618 }
619
620 static enum mapping_status get_mapping_status(struct sock *ssk)
621 {
622         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
623         struct mptcp_ext *mpext;
624         struct sk_buff *skb;
625         u16 data_len;
626         u64 map_seq;
627
628         skb = skb_peek(&ssk->sk_receive_queue);
629         if (!skb)
630                 return MAPPING_EMPTY;
631
632         mpext = mptcp_get_ext(skb);
633         if (!mpext || !mpext->use_map) {
634                 if (!subflow->map_valid && !skb->len) {
635                         /* the TCP stack deliver 0 len FIN pkt to the receive
636                          * queue, that is the only 0len pkts ever expected here,
637                          * and we can admit no mapping only for 0 len pkts
638                          */
639                         if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
640                                 WARN_ONCE(1, "0len seq %d:%d flags %x",
641                                           TCP_SKB_CB(skb)->seq,
642                                           TCP_SKB_CB(skb)->end_seq,
643                                           TCP_SKB_CB(skb)->tcp_flags);
644                         sk_eat_skb(ssk, skb);
645                         return MAPPING_EMPTY;
646                 }
647
648                 if (!subflow->map_valid)
649                         return MAPPING_INVALID;
650
651                 goto validate_seq;
652         }
653
654         pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
655                  mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
656                  mpext->data_len, mpext->data_fin);
657
658         data_len = mpext->data_len;
659         if (data_len == 0) {
660                 pr_err("Infinite mapping not handled");
661                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
662                 return MAPPING_INVALID;
663         }
664
665         if (mpext->data_fin == 1) {
666                 if (data_len == 1) {
667                         pr_debug("DATA_FIN with no payload");
668                         if (subflow->map_valid) {
669                                 /* A DATA_FIN might arrive in a DSS
670                                  * option before the previous mapping
671                                  * has been fully consumed. Continue
672                                  * handling the existing mapping.
673                                  */
674                                 skb_ext_del(skb, SKB_EXT_MPTCP);
675                                 return MAPPING_OK;
676                         } else {
677                                 return MAPPING_DATA_FIN;
678                         }
679                 }
680
681                 /* Adjust for DATA_FIN using 1 byte of sequence space */
682                 data_len--;
683         }
684
685         if (!mpext->dsn64) {
686                 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
687                                      mpext->data_seq);
688                 subflow->use_64bit_ack = 0;
689                 pr_debug("expanded seq=%llu", subflow->map_seq);
690         } else {
691                 map_seq = mpext->data_seq;
692                 subflow->use_64bit_ack = 1;
693         }
694
695         if (subflow->map_valid) {
696                 /* Allow replacing only with an identical map */
697                 if (subflow->map_seq == map_seq &&
698                     subflow->map_subflow_seq == mpext->subflow_seq &&
699                     subflow->map_data_len == data_len) {
700                         skb_ext_del(skb, SKB_EXT_MPTCP);
701                         return MAPPING_OK;
702                 }
703
704                 /* If this skb data are fully covered by the current mapping,
705                  * the new map would need caching, which is not supported
706                  */
707                 if (skb_is_fully_mapped(ssk, skb)) {
708                         MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
709                         return MAPPING_INVALID;
710                 }
711
712                 /* will validate the next map after consuming the current one */
713                 return MAPPING_OK;
714         }
715
716         subflow->map_seq = map_seq;
717         subflow->map_subflow_seq = mpext->subflow_seq;
718         subflow->map_data_len = data_len;
719         subflow->map_valid = 1;
720         subflow->mpc_map = mpext->mpc_map;
721         pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
722                  subflow->map_seq, subflow->map_subflow_seq,
723                  subflow->map_data_len);
724
725 validate_seq:
726         /* we revalidate valid mapping on new skb, because we must ensure
727          * the current skb is completely covered by the available mapping
728          */
729         if (!validate_mapping(ssk, skb))
730                 return MAPPING_INVALID;
731
732         skb_ext_del(skb, SKB_EXT_MPTCP);
733         return MAPPING_OK;
734 }
735
736 static int subflow_read_actor(read_descriptor_t *desc,
737                               struct sk_buff *skb,
738                               unsigned int offset, size_t len)
739 {
740         size_t copy_len = min(desc->count, len);
741
742         desc->count -= copy_len;
743
744         pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count);
745         return copy_len;
746 }
747
748 static bool subflow_check_data_avail(struct sock *ssk)
749 {
750         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
751         enum mapping_status status;
752         struct mptcp_sock *msk;
753         struct sk_buff *skb;
754
755         pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
756                  subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
757         if (subflow->data_avail)
758                 return true;
759
760         msk = mptcp_sk(subflow->conn);
761         for (;;) {
762                 u32 map_remaining;
763                 size_t delta;
764                 u64 ack_seq;
765                 u64 old_ack;
766
767                 status = get_mapping_status(ssk);
768                 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
769                 if (status == MAPPING_INVALID) {
770                         ssk->sk_err = EBADMSG;
771                         goto fatal;
772                 }
773
774                 if (status != MAPPING_OK)
775                         return false;
776
777                 skb = skb_peek(&ssk->sk_receive_queue);
778                 if (WARN_ON_ONCE(!skb))
779                         return false;
780
781                 /* if msk lacks the remote key, this subflow must provide an
782                  * MP_CAPABLE-based mapping
783                  */
784                 if (unlikely(!READ_ONCE(msk->can_ack))) {
785                         if (!subflow->mpc_map) {
786                                 ssk->sk_err = EBADMSG;
787                                 goto fatal;
788                         }
789                         WRITE_ONCE(msk->remote_key, subflow->remote_key);
790                         WRITE_ONCE(msk->ack_seq, subflow->map_seq);
791                         WRITE_ONCE(msk->can_ack, true);
792                 }
793
794                 old_ack = READ_ONCE(msk->ack_seq);
795                 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
796                 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
797                          ack_seq);
798                 if (ack_seq == old_ack)
799                         break;
800
801                 /* only accept in-sequence mapping. Old values are spurious
802                  * retransmission; we can hit "future" values on active backup
803                  * subflow switch, we relay on retransmissions to get
804                  * in-sequence data.
805                  * Cuncurrent subflows support will require subflow data
806                  * reordering
807                  */
808                 map_remaining = subflow->map_data_len -
809                                 mptcp_subflow_get_map_offset(subflow);
810                 if (before64(ack_seq, old_ack))
811                         delta = min_t(size_t, old_ack - ack_seq, map_remaining);
812                 else
813                         delta = min_t(size_t, ack_seq - old_ack, map_remaining);
814
815                 /* discard mapped data */
816                 pr_debug("discarding %zu bytes, current map len=%d", delta,
817                          map_remaining);
818                 if (delta) {
819                         read_descriptor_t desc = {
820                                 .count = delta,
821                         };
822                         int ret;
823
824                         ret = tcp_read_sock(ssk, &desc, subflow_read_actor);
825                         if (ret < 0) {
826                                 ssk->sk_err = -ret;
827                                 goto fatal;
828                         }
829                         if (ret < delta)
830                                 return false;
831                         if (delta == map_remaining)
832                                 subflow->map_valid = 0;
833                 }
834         }
835         return true;
836
837 fatal:
838         /* fatal protocol error, close the socket */
839         /* This barrier is coupled with smp_rmb() in tcp_poll() */
840         smp_wmb();
841         ssk->sk_error_report(ssk);
842         tcp_set_state(ssk, TCP_CLOSE);
843         tcp_send_active_reset(ssk, GFP_ATOMIC);
844         return false;
845 }
846
847 bool mptcp_subflow_data_available(struct sock *sk)
848 {
849         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
850         struct sk_buff *skb;
851
852         /* check if current mapping is still valid */
853         if (subflow->map_valid &&
854             mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
855                 subflow->map_valid = 0;
856                 subflow->data_avail = 0;
857
858                 pr_debug("Done with mapping: seq=%u data_len=%u",
859                          subflow->map_subflow_seq,
860                          subflow->map_data_len);
861         }
862
863         if (!subflow_check_data_avail(sk)) {
864                 subflow->data_avail = 0;
865                 return false;
866         }
867
868         skb = skb_peek(&sk->sk_receive_queue);
869         subflow->data_avail = skb &&
870                        before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq);
871         return subflow->data_avail;
872 }
873
874 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
875  * not the ssk one.
876  *
877  * In mptcp, rwin is about the mptcp-level connection data.
878  *
879  * Data that is still on the ssk rx queue can thus be ignored,
880  * as far as mptcp peer is concerened that data is still inflight.
881  * DSS ACK is updated when skb is moved to the mptcp rx queue.
882  */
883 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
884 {
885         const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
886         const struct sock *sk = subflow->conn;
887
888         *space = tcp_space(sk);
889         *full_space = tcp_full_space(sk);
890 }
891
892 static void subflow_data_ready(struct sock *sk)
893 {
894         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
895         struct sock *parent = subflow->conn;
896
897         if (!subflow->mp_capable && !subflow->mp_join) {
898                 subflow->tcp_data_ready(sk);
899
900                 parent->sk_data_ready(parent);
901                 return;
902         }
903
904         if (mptcp_subflow_data_available(sk))
905                 mptcp_data_ready(parent, sk);
906 }
907
908 static void subflow_write_space(struct sock *sk)
909 {
910         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
911         struct sock *parent = subflow->conn;
912
913         sk_stream_write_space(sk);
914         if (sk_stream_is_writeable(sk)) {
915                 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
916                 smp_mb__after_atomic();
917                 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
918                 sk_stream_write_space(parent);
919         }
920 }
921
922 static struct inet_connection_sock_af_ops *
923 subflow_default_af_ops(struct sock *sk)
924 {
925 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
926         if (sk->sk_family == AF_INET6)
927                 return &subflow_v6_specific;
928 #endif
929         return &subflow_specific;
930 }
931
932 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
933 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
934 {
935         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
936         struct inet_connection_sock *icsk = inet_csk(sk);
937         struct inet_connection_sock_af_ops *target;
938
939         target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
940
941         pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
942                  subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
943
944         if (likely(icsk->icsk_af_ops == target))
945                 return;
946
947         subflow->icsk_af_ops = icsk->icsk_af_ops;
948         icsk->icsk_af_ops = target;
949 }
950 #endif
951
952 static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
953                                 struct sockaddr_storage *addr)
954 {
955         memset(addr, 0, sizeof(*addr));
956         addr->ss_family = info->family;
957         if (addr->ss_family == AF_INET) {
958                 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
959
960                 in_addr->sin_addr = info->addr;
961                 in_addr->sin_port = info->port;
962         }
963 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
964         else if (addr->ss_family == AF_INET6) {
965                 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
966
967                 in6_addr->sin6_addr = info->addr6;
968                 in6_addr->sin6_port = info->port;
969         }
970 #endif
971 }
972
973 int __mptcp_subflow_connect(struct sock *sk, int ifindex,
974                             const struct mptcp_addr_info *loc,
975                             const struct mptcp_addr_info *remote)
976 {
977         struct mptcp_sock *msk = mptcp_sk(sk);
978         struct mptcp_subflow_context *subflow;
979         struct sockaddr_storage addr;
980         struct socket *sf;
981         u32 remote_token;
982         int addrlen;
983         int err;
984
985         if (sk->sk_state != TCP_ESTABLISHED)
986                 return -ENOTCONN;
987
988         err = mptcp_subflow_create_socket(sk, &sf);
989         if (err)
990                 return err;
991
992         subflow = mptcp_subflow_ctx(sf->sk);
993         subflow->remote_key = msk->remote_key;
994         subflow->local_key = msk->local_key;
995         subflow->token = msk->token;
996         mptcp_info2sockaddr(loc, &addr);
997
998         addrlen = sizeof(struct sockaddr_in);
999 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1000         if (loc->family == AF_INET6)
1001                 addrlen = sizeof(struct sockaddr_in6);
1002 #endif
1003         sf->sk->sk_bound_dev_if = ifindex;
1004         err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1005         if (err)
1006                 goto failed;
1007
1008         mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1009         pr_debug("msk=%p remote_token=%u", msk, remote_token);
1010         subflow->remote_token = remote_token;
1011         subflow->local_id = loc->id;
1012         subflow->request_join = 1;
1013         subflow->request_bkup = 1;
1014         mptcp_info2sockaddr(remote, &addr);
1015
1016         err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1017         if (err && err != -EINPROGRESS)
1018                 goto failed;
1019
1020         spin_lock_bh(&msk->join_list_lock);
1021         list_add_tail(&subflow->node, &msk->join_list);
1022         spin_unlock_bh(&msk->join_list_lock);
1023
1024         return err;
1025
1026 failed:
1027         sock_release(sf);
1028         return err;
1029 }
1030
1031 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1032 {
1033         struct mptcp_subflow_context *subflow;
1034         struct net *net = sock_net(sk);
1035         struct socket *sf;
1036         int err;
1037
1038         err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1039                                &sf);
1040         if (err)
1041                 return err;
1042
1043         lock_sock(sf->sk);
1044
1045         /* kernel sockets do not by default acquire net ref, but TCP timer
1046          * needs it.
1047          */
1048         sf->sk->sk_net_refcnt = 1;
1049         get_net(net);
1050 #ifdef CONFIG_PROC_FS
1051         this_cpu_add(*net->core.sock_inuse, 1);
1052 #endif
1053         err = tcp_set_ulp(sf->sk, "mptcp");
1054         release_sock(sf->sk);
1055
1056         if (err)
1057                 return err;
1058
1059         /* the newly created socket really belongs to the owning MPTCP master
1060          * socket, even if for additional subflows the allocation is performed
1061          * by a kernel workqueue. Adjust inode references, so that the
1062          * procfs/diag interaces really show this one belonging to the correct
1063          * user.
1064          */
1065         SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1066         SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1067         SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1068
1069         subflow = mptcp_subflow_ctx(sf->sk);
1070         pr_debug("subflow=%p", subflow);
1071
1072         *new_sock = sf;
1073         sock_hold(sk);
1074         subflow->conn = sk;
1075
1076         return 0;
1077 }
1078
1079 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1080                                                         gfp_t priority)
1081 {
1082         struct inet_connection_sock *icsk = inet_csk(sk);
1083         struct mptcp_subflow_context *ctx;
1084
1085         ctx = kzalloc(sizeof(*ctx), priority);
1086         if (!ctx)
1087                 return NULL;
1088
1089         rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1090         INIT_LIST_HEAD(&ctx->node);
1091
1092         pr_debug("subflow=%p", ctx);
1093
1094         ctx->tcp_sock = sk;
1095
1096         return ctx;
1097 }
1098
1099 static void __subflow_state_change(struct sock *sk)
1100 {
1101         struct socket_wq *wq;
1102
1103         rcu_read_lock();
1104         wq = rcu_dereference(sk->sk_wq);
1105         if (skwq_has_sleeper(wq))
1106                 wake_up_interruptible_all(&wq->wait);
1107         rcu_read_unlock();
1108 }
1109
1110 static bool subflow_is_done(const struct sock *sk)
1111 {
1112         return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1113 }
1114
1115 static void subflow_state_change(struct sock *sk)
1116 {
1117         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1118         struct sock *parent = subflow->conn;
1119
1120         __subflow_state_change(sk);
1121
1122         /* as recvmsg() does not acquire the subflow socket for ssk selection
1123          * a fin packet carrying a DSS can be unnoticed if we don't trigger
1124          * the data available machinery here.
1125          */
1126         if (subflow->mp_capable && mptcp_subflow_data_available(sk))
1127                 mptcp_data_ready(parent, sk);
1128
1129         if (!(parent->sk_shutdown & RCV_SHUTDOWN) &&
1130             !subflow->rx_eof && subflow_is_done(sk)) {
1131                 subflow->rx_eof = 1;
1132                 mptcp_subflow_eof(parent);
1133         }
1134 }
1135
1136 static int subflow_ulp_init(struct sock *sk)
1137 {
1138         struct inet_connection_sock *icsk = inet_csk(sk);
1139         struct mptcp_subflow_context *ctx;
1140         struct tcp_sock *tp = tcp_sk(sk);
1141         int err = 0;
1142
1143         /* disallow attaching ULP to a socket unless it has been
1144          * created with sock_create_kern()
1145          */
1146         if (!sk->sk_kern_sock) {
1147                 err = -EOPNOTSUPP;
1148                 goto out;
1149         }
1150
1151         ctx = subflow_create_ctx(sk, GFP_KERNEL);
1152         if (!ctx) {
1153                 err = -ENOMEM;
1154                 goto out;
1155         }
1156
1157         pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1158
1159         tp->is_mptcp = 1;
1160         ctx->icsk_af_ops = icsk->icsk_af_ops;
1161         icsk->icsk_af_ops = subflow_default_af_ops(sk);
1162         ctx->tcp_data_ready = sk->sk_data_ready;
1163         ctx->tcp_state_change = sk->sk_state_change;
1164         ctx->tcp_write_space = sk->sk_write_space;
1165         sk->sk_data_ready = subflow_data_ready;
1166         sk->sk_write_space = subflow_write_space;
1167         sk->sk_state_change = subflow_state_change;
1168 out:
1169         return err;
1170 }
1171
1172 static void subflow_ulp_release(struct sock *sk)
1173 {
1174         struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
1175
1176         if (!ctx)
1177                 return;
1178
1179         if (ctx->conn)
1180                 sock_put(ctx->conn);
1181
1182         kfree_rcu(ctx, rcu);
1183 }
1184
1185 static void subflow_ulp_clone(const struct request_sock *req,
1186                               struct sock *newsk,
1187                               const gfp_t priority)
1188 {
1189         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1190         struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1191         struct mptcp_subflow_context *new_ctx;
1192
1193         if (!tcp_rsk(req)->is_mptcp ||
1194             (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1195                 subflow_ulp_fallback(newsk, old_ctx);
1196                 return;
1197         }
1198
1199         new_ctx = subflow_create_ctx(newsk, priority);
1200         if (!new_ctx) {
1201                 subflow_ulp_fallback(newsk, old_ctx);
1202                 return;
1203         }
1204
1205         new_ctx->conn_finished = 1;
1206         new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1207         new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1208         new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1209         new_ctx->tcp_write_space = old_ctx->tcp_write_space;
1210         new_ctx->rel_write_seq = 1;
1211         new_ctx->tcp_sock = newsk;
1212
1213         if (subflow_req->mp_capable) {
1214                 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1215                  * is fully established only after we receive the remote key
1216                  */
1217                 new_ctx->mp_capable = 1;
1218                 new_ctx->local_key = subflow_req->local_key;
1219                 new_ctx->token = subflow_req->token;
1220                 new_ctx->ssn_offset = subflow_req->ssn_offset;
1221                 new_ctx->idsn = subflow_req->idsn;
1222         } else if (subflow_req->mp_join) {
1223                 new_ctx->ssn_offset = subflow_req->ssn_offset;
1224                 new_ctx->mp_join = 1;
1225                 new_ctx->fully_established = 1;
1226                 new_ctx->backup = subflow_req->backup;
1227                 new_ctx->local_id = subflow_req->local_id;
1228                 new_ctx->token = subflow_req->token;
1229                 new_ctx->thmac = subflow_req->thmac;
1230         }
1231 }
1232
1233 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1234         .name           = "mptcp",
1235         .owner          = THIS_MODULE,
1236         .init           = subflow_ulp_init,
1237         .release        = subflow_ulp_release,
1238         .clone          = subflow_ulp_clone,
1239 };
1240
1241 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1242 {
1243         subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1244         subflow_ops->slab_name = "request_sock_subflow";
1245
1246         subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1247                                               subflow_ops->obj_size, 0,
1248                                               SLAB_ACCOUNT |
1249                                               SLAB_TYPESAFE_BY_RCU,
1250                                               NULL);
1251         if (!subflow_ops->slab)
1252                 return -ENOMEM;
1253
1254         subflow_ops->destructor = subflow_req_destructor;
1255
1256         return 0;
1257 }
1258
1259 void mptcp_subflow_init(void)
1260 {
1261         subflow_request_sock_ops = tcp_request_sock_ops;
1262         if (subflow_ops_init(&subflow_request_sock_ops) != 0)
1263                 panic("MPTCP: failed to init subflow request sock ops\n");
1264
1265         subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1266         subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
1267
1268         subflow_specific = ipv4_specific;
1269         subflow_specific.conn_request = subflow_v4_conn_request;
1270         subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1271         subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1272         subflow_specific.rebuild_header = subflow_rebuild_header;
1273
1274 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1275         subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1276         subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
1277
1278         subflow_v6_specific = ipv6_specific;
1279         subflow_v6_specific.conn_request = subflow_v6_conn_request;
1280         subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1281         subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1282         subflow_v6_specific.rebuild_header = subflow_rebuild_header;
1283
1284         subflow_v6m_specific = subflow_v6_specific;
1285         subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1286         subflow_v6m_specific.send_check = ipv4_specific.send_check;
1287         subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1288         subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1289         subflow_v6m_specific.net_frag_header_len = 0;
1290 #endif
1291
1292         mptcp_diag_subflow_init(&subflow_ulp_ops);
1293
1294         if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1295                 panic("MPTCP: failed to register subflows to ULP\n");
1296 }