1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 #include <linux/bottom_half.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/jiffies.h>
31 #include <linux/in6.h>
32 #include <linux/netdevice.h>
33 #include <linux/init.h>
34 #include <linux/jhash.h>
35 #include <linux/ipsec.h>
36 #include <linux/times.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/indirect_call_wrapper.h>
45 #include <net/ndisc.h>
46 #include <net/inet6_hashtables.h>
47 #include <net/inet6_connection_sock.h>
49 #include <net/transp_v6.h>
50 #include <net/addrconf.h>
51 #include <net/ip6_route.h>
52 #include <net/ip6_checksum.h>
53 #include <net/inet_ecn.h>
54 #include <net/protocol.h>
57 #include <net/dsfield.h>
58 #include <net/timewait_sock.h>
59 #include <net/inet_common.h>
60 #include <net/secure_seq.h>
61 #include <net/busy_poll.h>
63 #include <linux/proc_fs.h>
64 #include <linux/seq_file.h>
66 #include <crypto/hash.h>
67 #include <linux/scatterlist.h>
69 #include <trace/events/tcp.h>
71 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
72 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
73 struct request_sock *req);
75 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77 static const struct inet_connection_sock_af_ops ipv6_mapped;
78 const struct inet_connection_sock_af_ops ipv6_specific;
79 #ifdef CONFIG_TCP_MD5SIG
80 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
84 const struct in6_addr *addr,
91 /* Helper returning the inet6 address from a given tcp socket.
92 * It can be used in TCP stack instead of inet6_sk(sk).
93 * This avoids a dereference and allow compiler optimizations.
94 * It is a specialized version of inet6_sk_generic().
96 static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
98 unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
100 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
103 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
105 struct dst_entry *dst = skb_dst(skb);
107 if (dst && dst_hold_safe(dst)) {
108 const struct rt6_info *rt = (const struct rt6_info *)dst;
111 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
112 tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
116 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
118 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
121 tcp_hdr(skb)->source);
124 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
126 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
127 ipv6_hdr(skb)->saddr.s6_addr32);
130 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
133 /* This check is replicated from tcp_v6_connect() and intended to
134 * prevent BPF program called below from accessing bytes that are out
135 * of the bound specified by user in addr_len.
137 if (addr_len < SIN6_LEN_RFC2133)
140 sock_owned_by_me(sk);
142 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
145 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
148 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
149 struct inet_sock *inet = inet_sk(sk);
150 struct inet_connection_sock *icsk = inet_csk(sk);
151 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
152 struct tcp_sock *tp = tcp_sk(sk);
153 struct in6_addr *saddr = NULL, *final_p, final;
154 struct ipv6_txoptions *opt;
156 struct dst_entry *dst;
159 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
161 if (addr_len < SIN6_LEN_RFC2133)
164 if (usin->sin6_family != AF_INET6)
165 return -EAFNOSUPPORT;
167 memset(&fl6, 0, sizeof(fl6));
170 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
171 IP6_ECN_flow_init(fl6.flowlabel);
172 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
173 struct ip6_flowlabel *flowlabel;
174 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
175 if (IS_ERR(flowlabel))
177 fl6_sock_release(flowlabel);
182 * connect() to INADDR_ANY means loopback (BSD'ism).
185 if (ipv6_addr_any(&usin->sin6_addr)) {
186 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
187 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
190 usin->sin6_addr = in6addr_loopback;
193 addr_type = ipv6_addr_type(&usin->sin6_addr);
195 if (addr_type & IPV6_ADDR_MULTICAST)
198 if (addr_type&IPV6_ADDR_LINKLOCAL) {
199 if (addr_len >= sizeof(struct sockaddr_in6) &&
200 usin->sin6_scope_id) {
201 /* If interface is set while binding, indices
204 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
207 sk->sk_bound_dev_if = usin->sin6_scope_id;
210 /* Connect to link-local address requires an interface */
211 if (!sk->sk_bound_dev_if)
215 if (tp->rx_opt.ts_recent_stamp &&
216 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
217 tp->rx_opt.ts_recent = 0;
218 tp->rx_opt.ts_recent_stamp = 0;
219 WRITE_ONCE(tp->write_seq, 0);
222 sk->sk_v6_daddr = usin->sin6_addr;
223 np->flow_label = fl6.flowlabel;
229 if (addr_type & IPV6_ADDR_MAPPED) {
230 u32 exthdrlen = icsk->icsk_ext_hdr_len;
231 struct sockaddr_in sin;
233 if (__ipv6_only_sock(sk))
236 sin.sin_family = AF_INET;
237 sin.sin_port = usin->sin6_port;
238 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
240 icsk->icsk_af_ops = &ipv6_mapped;
242 mptcpv6_handle_mapped(sk, true);
243 sk->sk_backlog_rcv = tcp_v4_do_rcv;
244 #ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
248 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
251 icsk->icsk_ext_hdr_len = exthdrlen;
252 icsk->icsk_af_ops = &ipv6_specific;
254 mptcpv6_handle_mapped(sk, false);
255 sk->sk_backlog_rcv = tcp_v6_do_rcv;
256 #ifdef CONFIG_TCP_MD5SIG
257 tp->af_specific = &tcp_sock_ipv6_specific;
261 np->saddr = sk->sk_v6_rcv_saddr;
266 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
267 saddr = &sk->sk_v6_rcv_saddr;
269 fl6.flowi6_proto = IPPROTO_TCP;
270 fl6.daddr = sk->sk_v6_daddr;
271 fl6.saddr = saddr ? *saddr : np->saddr;
272 fl6.flowi6_oif = sk->sk_bound_dev_if;
273 fl6.flowi6_mark = sk->sk_mark;
274 fl6.fl6_dport = usin->sin6_port;
275 fl6.fl6_sport = inet->inet_sport;
276 fl6.flowi6_uid = sk->sk_uid;
278 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
279 final_p = fl6_update_dst(&fl6, opt, &final);
281 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
283 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
291 sk->sk_v6_rcv_saddr = *saddr;
294 /* set the source address */
296 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
298 sk->sk_gso_type = SKB_GSO_TCPV6;
299 ip6_dst_store(sk, dst, NULL, NULL);
301 icsk->icsk_ext_hdr_len = 0;
303 icsk->icsk_ext_hdr_len = opt->opt_flen +
306 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
308 inet->inet_dport = usin->sin6_port;
310 tcp_set_state(sk, TCP_SYN_SENT);
311 err = inet6_hash_connect(tcp_death_row, sk);
317 if (likely(!tp->repair)) {
319 WRITE_ONCE(tp->write_seq,
320 secure_tcpv6_seq(np->saddr.s6_addr32,
321 sk->sk_v6_daddr.s6_addr32,
324 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
326 sk->sk_v6_daddr.s6_addr32);
329 if (tcp_fastopen_defer_connect(sk, &err))
334 err = tcp_connect(sk);
341 tcp_set_state(sk, TCP_CLOSE);
343 inet->inet_dport = 0;
344 sk->sk_route_caps = 0;
348 static void tcp_v6_mtu_reduced(struct sock *sk)
350 struct dst_entry *dst;
353 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
356 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
358 /* Drop requests trying to increase our current mss.
359 * Check done in __ip6_rt_update_pmtu() is too late.
361 if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
364 dst = inet6_csk_update_pmtu(sk, mtu);
368 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
369 tcp_sync_mss(sk, dst_mtu(dst));
370 tcp_simple_retransmit(sk);
374 static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
375 u8 type, u8 code, int offset, __be32 info)
377 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
378 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
379 struct net *net = dev_net(skb->dev);
380 struct request_sock *fastopen;
381 struct ipv6_pinfo *np;
388 sk = __inet6_lookup_established(net, &tcp_hashinfo,
389 &hdr->daddr, th->dest,
390 &hdr->saddr, ntohs(th->source),
391 skb->dev->ifindex, inet6_sdif(skb));
394 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
399 if (sk->sk_state == TCP_TIME_WAIT) {
400 inet_twsk_put(inet_twsk(sk));
403 seq = ntohl(th->seq);
404 fatal = icmpv6_err_convert(type, code, &err);
405 if (sk->sk_state == TCP_NEW_SYN_RECV) {
406 tcp_req_err(sk, seq, fatal);
411 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
412 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
414 if (sk->sk_state == TCP_CLOSE)
417 if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
418 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
423 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
424 fastopen = rcu_dereference(tp->fastopen_rsk);
425 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
426 if (sk->sk_state != TCP_LISTEN &&
427 !between(seq, snd_una, tp->snd_nxt)) {
428 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
432 np = tcp_inet6_sk(sk);
434 if (type == NDISC_REDIRECT) {
435 if (!sock_owned_by_user(sk)) {
436 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
439 dst->ops->redirect(dst, sk, skb);
444 if (type == ICMPV6_PKT_TOOBIG) {
445 u32 mtu = ntohl(info);
447 /* We are not interested in TCP_LISTEN and open_requests
448 * (SYN-ACKs send out by Linux are always <576bytes so
449 * they should go through unfragmented).
451 if (sk->sk_state == TCP_LISTEN)
454 if (!ip6_sk_accept_pmtu(sk))
457 if (mtu < IPV6_MIN_MTU)
460 WRITE_ONCE(tp->mtu_info, mtu);
462 if (!sock_owned_by_user(sk))
463 tcp_v6_mtu_reduced(sk);
464 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
471 /* Might be for an request_sock */
472 switch (sk->sk_state) {
475 /* Only in fast or simultaneous open. If a fast open socket is
476 * already accepted it is treated as a connected one below.
478 if (fastopen && !fastopen->sk)
481 ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
483 if (!sock_owned_by_user(sk)) {
485 sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
489 sk->sk_err_soft = err;
494 /* check if this ICMP message allows revert of backoff.
497 if (!fastopen && type == ICMPV6_DEST_UNREACH &&
498 code == ICMPV6_NOROUTE)
499 tcp_ld_RTO_revert(sk, seq);
502 if (!sock_owned_by_user(sk) && np->recverr) {
506 sk->sk_err_soft = err;
515 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
517 struct request_sock *req,
518 struct tcp_fastopen_cookie *foc,
519 enum tcp_synack_type synack_type,
520 struct sk_buff *syn_skb)
522 struct inet_request_sock *ireq = inet_rsk(req);
523 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
524 struct ipv6_txoptions *opt;
525 struct flowi6 *fl6 = &fl->u.ip6;
530 /* First, grab a route. */
531 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
532 IPPROTO_TCP)) == NULL)
535 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
538 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
539 &ireq->ir_v6_rmt_addr);
541 fl6->daddr = ireq->ir_v6_rmt_addr;
542 if (np->repflow && ireq->pktopts)
543 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
545 tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
546 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
547 (np->tclass & INET_ECN_MASK) :
550 if (!INET_ECN_is_capable(tclass) &&
551 tcp_bpf_ca_needs_ecn((struct sock *)req))
552 tclass |= INET_ECN_ECT_0;
555 opt = ireq->ipv6_opt;
557 opt = rcu_dereference(np->opt);
558 err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
559 tclass, sk->sk_priority);
561 err = net_xmit_eval(err);
569 static void tcp_v6_reqsk_destructor(struct request_sock *req)
571 kfree(inet_rsk(req)->ipv6_opt);
572 kfree_skb(inet_rsk(req)->pktopts);
575 #ifdef CONFIG_TCP_MD5SIG
576 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
577 const struct in6_addr *addr,
580 return tcp_md5_do_lookup(sk, l3index,
581 (union tcp_md5_addr *)addr, AF_INET6);
584 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
585 const struct sock *addr_sk)
589 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
590 addr_sk->sk_bound_dev_if);
591 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
595 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
596 sockptr_t optval, int optlen)
598 struct tcp_md5sig cmd;
599 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
604 if (optlen < sizeof(cmd))
607 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
610 if (sin6->sin6_family != AF_INET6)
613 flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
615 if (optname == TCP_MD5SIG_EXT &&
616 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
617 prefixlen = cmd.tcpm_prefixlen;
618 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
622 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
625 if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
626 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
627 struct net_device *dev;
630 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
631 if (dev && netif_is_l3_master(dev))
632 l3index = dev->ifindex;
635 /* ok to reference set/not set outside of rcu;
636 * right now device MUST be an L3 master
638 if (!dev || !l3index)
642 if (!cmd.tcpm_keylen) {
643 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
644 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
647 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
648 AF_INET6, prefixlen, l3index, flags);
651 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
654 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
655 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
656 AF_INET, prefixlen, l3index, flags,
657 cmd.tcpm_key, cmd.tcpm_keylen,
660 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
661 AF_INET6, prefixlen, l3index, flags,
662 cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
665 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
666 const struct in6_addr *daddr,
667 const struct in6_addr *saddr,
668 const struct tcphdr *th, int nbytes)
670 struct tcp6_pseudohdr *bp;
671 struct scatterlist sg;
675 /* 1. TCP pseudo-header (RFC2460) */
678 bp->protocol = cpu_to_be32(IPPROTO_TCP);
679 bp->len = cpu_to_be32(nbytes);
681 _th = (struct tcphdr *)(bp + 1);
682 memcpy(_th, th, sizeof(*th));
685 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
686 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
687 sizeof(*bp) + sizeof(*th));
688 return crypto_ahash_update(hp->md5_req);
691 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
692 const struct in6_addr *daddr, struct in6_addr *saddr,
693 const struct tcphdr *th)
695 struct tcp_md5sig_pool *hp;
696 struct ahash_request *req;
698 hp = tcp_get_md5sig_pool();
700 goto clear_hash_noput;
703 if (crypto_ahash_init(req))
705 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
707 if (tcp_md5_hash_key(hp, key))
709 ahash_request_set_crypt(req, NULL, md5_hash, 0);
710 if (crypto_ahash_final(req))
713 tcp_put_md5sig_pool();
717 tcp_put_md5sig_pool();
719 memset(md5_hash, 0, 16);
723 static int tcp_v6_md5_hash_skb(char *md5_hash,
724 const struct tcp_md5sig_key *key,
725 const struct sock *sk,
726 const struct sk_buff *skb)
728 const struct in6_addr *saddr, *daddr;
729 struct tcp_md5sig_pool *hp;
730 struct ahash_request *req;
731 const struct tcphdr *th = tcp_hdr(skb);
733 if (sk) { /* valid for establish/request sockets */
734 saddr = &sk->sk_v6_rcv_saddr;
735 daddr = &sk->sk_v6_daddr;
737 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
738 saddr = &ip6h->saddr;
739 daddr = &ip6h->daddr;
742 hp = tcp_get_md5sig_pool();
744 goto clear_hash_noput;
747 if (crypto_ahash_init(req))
750 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
752 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
754 if (tcp_md5_hash_key(hp, key))
756 ahash_request_set_crypt(req, NULL, md5_hash, 0);
757 if (crypto_ahash_final(req))
760 tcp_put_md5sig_pool();
764 tcp_put_md5sig_pool();
766 memset(md5_hash, 0, 16);
772 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
773 const struct sk_buff *skb,
776 #ifdef CONFIG_TCP_MD5SIG
777 const __u8 *hash_location = NULL;
778 struct tcp_md5sig_key *hash_expected;
779 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
780 const struct tcphdr *th = tcp_hdr(skb);
781 int genhash, l3index;
784 /* sdif set, means packet ingressed via a device
785 * in an L3 domain and dif is set to the l3mdev
787 l3index = sdif ? dif : 0;
789 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr, l3index);
790 hash_location = tcp_parse_md5sig_option(th);
792 /* We've parsed the options - do we have a hash? */
793 if (!hash_expected && !hash_location)
796 if (hash_expected && !hash_location) {
797 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
801 if (!hash_expected && hash_location) {
802 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
806 /* check the signature */
807 genhash = tcp_v6_md5_hash_skb(newhash,
811 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
812 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
813 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
814 genhash ? "failed" : "mismatch",
815 &ip6h->saddr, ntohs(th->source),
816 &ip6h->daddr, ntohs(th->dest), l3index);
823 static void tcp_v6_init_req(struct request_sock *req,
824 const struct sock *sk_listener,
827 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
828 struct inet_request_sock *ireq = inet_rsk(req);
829 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
831 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
832 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
834 /* So that link locals have meaning */
835 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
836 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
837 ireq->ir_iif = tcp_v6_iif(skb);
839 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
840 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
841 np->rxopt.bits.rxinfo ||
842 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
843 np->rxopt.bits.rxohlim || np->repflow)) {
844 refcount_inc(&skb->users);
849 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
852 struct request_sock *req)
854 tcp_v6_init_req(req, sk, skb);
856 if (security_inet_conn_request(sk, skb, req))
859 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
862 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
864 .obj_size = sizeof(struct tcp6_request_sock),
865 .rtx_syn_ack = tcp_rtx_synack,
866 .send_ack = tcp_v6_reqsk_send_ack,
867 .destructor = tcp_v6_reqsk_destructor,
868 .send_reset = tcp_v6_send_reset,
869 .syn_ack_timeout = tcp_syn_ack_timeout,
872 const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
873 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
874 sizeof(struct ipv6hdr),
875 #ifdef CONFIG_TCP_MD5SIG
876 .req_md5_lookup = tcp_v6_md5_lookup,
877 .calc_md5_hash = tcp_v6_md5_hash_skb,
879 #ifdef CONFIG_SYN_COOKIES
880 .cookie_init_seq = cookie_v6_init_sequence,
882 .route_req = tcp_v6_route_req,
883 .init_seq = tcp_v6_init_seq,
884 .init_ts_off = tcp_v6_init_ts_off,
885 .send_synack = tcp_v6_send_synack,
888 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
889 u32 ack, u32 win, u32 tsval, u32 tsecr,
890 int oif, struct tcp_md5sig_key *key, int rst,
891 u8 tclass, __be32 label, u32 priority)
893 const struct tcphdr *th = tcp_hdr(skb);
895 struct sk_buff *buff;
897 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
898 struct sock *ctl_sk = net->ipv6.tcp_sk;
899 unsigned int tot_len = sizeof(struct tcphdr);
900 __be32 mrst = 0, *topt;
901 struct dst_entry *dst;
905 tot_len += TCPOLEN_TSTAMP_ALIGNED;
906 #ifdef CONFIG_TCP_MD5SIG
908 tot_len += TCPOLEN_MD5SIG_ALIGNED;
913 mrst = mptcp_reset_option(skb);
916 tot_len += sizeof(__be32);
920 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
925 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
927 t1 = skb_push(buff, tot_len);
928 skb_reset_transport_header(buff);
930 /* Swap the send and the receive. */
931 memset(t1, 0, sizeof(*t1));
932 t1->dest = th->source;
933 t1->source = th->dest;
934 t1->doff = tot_len / 4;
935 t1->seq = htonl(seq);
936 t1->ack_seq = htonl(ack);
937 t1->ack = !rst || !th->ack;
939 t1->window = htons(win);
941 topt = (__be32 *)(t1 + 1);
944 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
945 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
946 *topt++ = htonl(tsval);
947 *topt++ = htonl(tsecr);
953 #ifdef CONFIG_TCP_MD5SIG
955 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
956 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
957 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
958 &ipv6_hdr(skb)->saddr,
959 &ipv6_hdr(skb)->daddr, t1);
963 memset(&fl6, 0, sizeof(fl6));
964 fl6.daddr = ipv6_hdr(skb)->saddr;
965 fl6.saddr = ipv6_hdr(skb)->daddr;
966 fl6.flowlabel = label;
968 buff->ip_summed = CHECKSUM_PARTIAL;
971 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
973 fl6.flowi6_proto = IPPROTO_TCP;
974 if (rt6_need_strict(&fl6.daddr) && !oif)
975 fl6.flowi6_oif = tcp_v6_iif(skb);
977 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
980 fl6.flowi6_oif = oif;
984 if (sk->sk_state == TCP_TIME_WAIT) {
985 mark = inet_twsk(sk)->tw_mark;
986 /* autoflowlabel relies on buff->hash */
987 skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
992 buff->tstamp = tcp_transmit_time(sk);
994 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
995 fl6.fl6_dport = t1->dest;
996 fl6.fl6_sport = t1->source;
997 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
998 security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
1000 /* Pass a socket to ip6_dst_lookup either it is for RST
1001 * Underlying function will use this to retrieve the network
1004 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
1006 skb_dst_set(buff, dst);
1007 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
1008 tclass & ~INET_ECN_MASK, priority);
1009 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
1011 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
1018 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1020 const struct tcphdr *th = tcp_hdr(skb);
1021 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1022 u32 seq = 0, ack_seq = 0;
1023 struct tcp_md5sig_key *key = NULL;
1024 #ifdef CONFIG_TCP_MD5SIG
1025 const __u8 *hash_location = NULL;
1026 unsigned char newhash[16];
1028 struct sock *sk1 = NULL;
1038 /* If sk not NULL, it means we did a successful lookup and incoming
1039 * route had to be correct. prequeue might have dropped our dst.
1041 if (!sk && !ipv6_unicast_destination(skb))
1044 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
1045 #ifdef CONFIG_TCP_MD5SIG
1047 hash_location = tcp_parse_md5sig_option(th);
1048 if (sk && sk_fullsock(sk)) {
1051 /* sdif set, means packet ingressed via a device
1052 * in an L3 domain and inet_iif is set to it.
1054 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1055 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
1056 } else if (hash_location) {
1057 int dif = tcp_v6_iif_l3_slave(skb);
1058 int sdif = tcp_v6_sdif(skb);
1062 * active side is lost. Try to find listening socket through
1063 * source port, and then find md5 key through listening socket.
1064 * we are not loose security here:
1065 * Incoming packet is checked with md5 hash with finding key,
1066 * no RST generated if md5 hash doesn't match.
1068 sk1 = inet6_lookup_listener(net,
1069 &tcp_hashinfo, NULL, 0,
1071 th->source, &ipv6h->daddr,
1072 ntohs(th->source), dif, sdif);
1076 /* sdif set, means packet ingressed via a device
1077 * in an L3 domain and dif is set to it.
1079 l3index = tcp_v6_sdif(skb) ? dif : 0;
1081 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
1085 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
1086 if (genhash || memcmp(hash_location, newhash, 16) != 0)
1092 seq = ntohl(th->ack_seq);
1094 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1098 oif = sk->sk_bound_dev_if;
1099 if (sk_fullsock(sk)) {
1100 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1102 trace_tcp_send_reset(sk, skb);
1104 label = ip6_flowlabel(ipv6h);
1105 priority = sk->sk_priority;
1107 if (sk->sk_state == TCP_TIME_WAIT) {
1108 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1109 priority = inet_twsk(sk)->tw_priority;
1112 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1113 label = ip6_flowlabel(ipv6h);
1116 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
1117 ipv6_get_dsfield(ipv6h), label, priority);
1119 #ifdef CONFIG_TCP_MD5SIG
1125 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1126 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1127 struct tcp_md5sig_key *key, u8 tclass,
1128 __be32 label, u32 priority)
1130 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
1131 tclass, label, priority);
1134 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1136 struct inet_timewait_sock *tw = inet_twsk(sk);
1137 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1139 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1140 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1141 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
1142 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
1143 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority);
1148 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1149 struct request_sock *req)
1153 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1155 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1156 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1159 * The window field (SEG.WND) of every outgoing segment, with the
1160 * exception of <SYN> segments, MUST be right-shifted by
1161 * Rcv.Wind.Shift bits:
1163 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1164 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1165 tcp_rsk(req)->rcv_nxt,
1166 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1167 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1168 req->ts_recent, sk->sk_bound_dev_if,
1169 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
1170 ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority);
1174 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1176 #ifdef CONFIG_SYN_COOKIES
1177 const struct tcphdr *th = tcp_hdr(skb);
1180 sk = cookie_v6_check(sk, skb);
1185 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1186 struct tcphdr *th, u32 *cookie)
1189 #ifdef CONFIG_SYN_COOKIES
1190 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1191 &tcp_request_sock_ipv6_ops, sk, th);
1193 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1194 tcp_synq_overflow(sk);
1200 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1202 if (skb->protocol == htons(ETH_P_IP))
1203 return tcp_v4_conn_request(sk, skb);
1205 if (!ipv6_unicast_destination(skb))
1208 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1209 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1213 return tcp_conn_request(&tcp6_request_sock_ops,
1214 &tcp_request_sock_ipv6_ops, sk, skb);
1218 return 0; /* don't send reset */
1221 static void tcp_v6_restore_cb(struct sk_buff *skb)
1223 /* We need to move header back to the beginning if xfrm6_policy_check()
1224 * and tcp_v6_fill_cb() are going to be called again.
1225 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1227 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1228 sizeof(struct inet6_skb_parm));
1231 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1232 struct request_sock *req,
1233 struct dst_entry *dst,
1234 struct request_sock *req_unhash,
1237 struct inet_request_sock *ireq;
1238 struct ipv6_pinfo *newnp;
1239 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1240 struct ipv6_txoptions *opt;
1241 struct inet_sock *newinet;
1242 bool found_dup_sk = false;
1243 struct tcp_sock *newtp;
1245 #ifdef CONFIG_TCP_MD5SIG
1246 struct tcp_md5sig_key *key;
1251 if (skb->protocol == htons(ETH_P_IP)) {
1256 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1257 req_unhash, own_req);
1262 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1264 newinet = inet_sk(newsk);
1265 newnp = tcp_inet6_sk(newsk);
1266 newtp = tcp_sk(newsk);
1268 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1270 newnp->saddr = newsk->sk_v6_rcv_saddr;
1272 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1273 if (sk_is_mptcp(newsk))
1274 mptcpv6_handle_mapped(newsk, true);
1275 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1276 #ifdef CONFIG_TCP_MD5SIG
1277 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1280 newnp->ipv6_mc_list = NULL;
1281 newnp->ipv6_ac_list = NULL;
1282 newnp->ipv6_fl_list = NULL;
1283 newnp->pktoptions = NULL;
1285 newnp->mcast_oif = inet_iif(skb);
1286 newnp->mcast_hops = ip_hdr(skb)->ttl;
1287 newnp->rcv_flowinfo = 0;
1289 newnp->flow_label = 0;
1292 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1293 * here, tcp_create_openreq_child now does this for us, see the comment in
1294 * that function for the gory details. -acme
1297 /* It is tricky place. Until this moment IPv4 tcp
1298 worked with IPv6 icsk.icsk_af_ops.
1301 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1306 ireq = inet_rsk(req);
1308 if (sk_acceptq_is_full(sk))
1312 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1317 newsk = tcp_create_openreq_child(sk, req, skb);
1322 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1323 * count here, tcp_create_openreq_child now does this for us, see the
1324 * comment in that function for the gory details. -acme
1327 newsk->sk_gso_type = SKB_GSO_TCPV6;
1328 ip6_dst_store(newsk, dst, NULL, NULL);
1329 inet6_sk_rx_dst_set(newsk, skb);
1331 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1333 newtp = tcp_sk(newsk);
1334 newinet = inet_sk(newsk);
1335 newnp = tcp_inet6_sk(newsk);
1337 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1339 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1340 newnp->saddr = ireq->ir_v6_loc_addr;
1341 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1342 newsk->sk_bound_dev_if = ireq->ir_iif;
1344 /* Now IPv6 options...
1346 First: no IPv4 options.
1348 newinet->inet_opt = NULL;
1349 newnp->ipv6_mc_list = NULL;
1350 newnp->ipv6_ac_list = NULL;
1351 newnp->ipv6_fl_list = NULL;
1354 newnp->rxopt.all = np->rxopt.all;
1356 newnp->pktoptions = NULL;
1358 newnp->mcast_oif = tcp_v6_iif(skb);
1359 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1360 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1362 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1364 /* Set ToS of the new socket based upon the value of incoming SYN.
1365 * ECT bits are set later in tcp_init_transfer().
1367 if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
1368 newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1370 /* Clone native IPv6 options from listening socket (if any)
1372 Yes, keeping reference count would be much more clever,
1373 but we make one more one thing there: reattach optmem
1376 opt = ireq->ipv6_opt;
1378 opt = rcu_dereference(np->opt);
1380 opt = ipv6_dup_options(newsk, opt);
1381 RCU_INIT_POINTER(newnp->opt, opt);
1383 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1385 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1388 tcp_ca_openreq_child(newsk, dst);
1390 tcp_sync_mss(newsk, dst_mtu(dst));
1391 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1393 tcp_initialize_rcv_mss(newsk);
1395 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1396 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1398 #ifdef CONFIG_TCP_MD5SIG
1399 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1401 /* Copy over the MD5 key from the original socket */
1402 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
1404 /* We're using one, so create a matching key
1405 * on the newsk structure. If we fail to get
1406 * memory, then we end up not copying the key
1409 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1410 AF_INET6, 128, l3index, key->flags, key->key, key->keylen,
1411 sk_gfp_mask(sk, GFP_ATOMIC));
1415 if (__inet_inherit_port(sk, newsk) < 0) {
1416 inet_csk_prepare_forced_close(newsk);
1420 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1423 tcp_move_syn(newtp, req);
1425 /* Clone pktoptions received with SYN, if we own the req */
1426 if (ireq->pktopts) {
1427 newnp->pktoptions = skb_clone(ireq->pktopts,
1428 sk_gfp_mask(sk, GFP_ATOMIC));
1429 consume_skb(ireq->pktopts);
1430 ireq->pktopts = NULL;
1431 if (newnp->pktoptions) {
1432 tcp_v6_restore_cb(newnp->pktoptions);
1433 skb_set_owner_r(newnp->pktoptions, newsk);
1437 if (!req_unhash && found_dup_sk) {
1438 /* This code path should only be executed in the
1439 * syncookie case only
1441 bh_unlock_sock(newsk);
1450 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1458 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1460 /* The socket must have it's spinlock held when we get
1461 * here, unless it is a TCP_LISTEN socket.
1463 * We have a potential double-lock case here, so even when
1464 * doing backlog processing we use the BH locking scheme.
1465 * This is because we cannot sleep with the original spinlock
1468 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1470 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1471 struct sk_buff *opt_skb = NULL;
1472 struct tcp_sock *tp;
1474 /* Imagine: socket is IPv6. IPv4 packet arrives,
1475 goes to IPv4 receive handler and backlogged.
1476 From backlog it always goes here. Kerboom...
1477 Fortunately, tcp_rcv_established and rcv_established
1478 handle them correctly, but it is not case with
1479 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1482 if (skb->protocol == htons(ETH_P_IP))
1483 return tcp_v4_do_rcv(sk, skb);
1486 * socket locking is here for SMP purposes as backlog rcv
1487 * is currently called with bh processing disabled.
1490 /* Do Stevens' IPV6_PKTOPTIONS.
1492 Yes, guys, it is the only place in our code, where we
1493 may make it not affecting IPv4.
1494 The rest of code is protocol independent,
1495 and I do not like idea to uglify IPv4.
1497 Actually, all the idea behind IPV6_PKTOPTIONS
1498 looks not very well thought. For now we latch
1499 options, received in the last packet, enqueued
1500 by tcp. Feel free to propose better solution.
1504 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1506 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1507 struct dst_entry *dst = sk->sk_rx_dst;
1509 sock_rps_save_rxhash(sk, skb);
1510 sk_mark_napi_id(sk, skb);
1512 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1513 INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1514 dst, np->rx_dst_cookie) == NULL) {
1516 sk->sk_rx_dst = NULL;
1520 tcp_rcv_established(sk, skb);
1522 goto ipv6_pktoptions;
1526 if (tcp_checksum_complete(skb))
1529 if (sk->sk_state == TCP_LISTEN) {
1530 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1536 if (tcp_child_process(sk, nsk, skb))
1539 __kfree_skb(opt_skb);
1543 sock_rps_save_rxhash(sk, skb);
1545 if (tcp_rcv_state_process(sk, skb))
1548 goto ipv6_pktoptions;
1552 tcp_v6_send_reset(sk, skb);
1555 __kfree_skb(opt_skb);
1559 trace_tcp_bad_csum(skb);
1560 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1561 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1566 /* Do you ask, what is it?
1568 1. skb was enqueued by tcp.
1569 2. skb is added to tail of read queue, rather than out of order.
1570 3. socket is not in passive state.
1571 4. Finally, it really contains options, which user wants to receive.
1574 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1575 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1576 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1577 np->mcast_oif = tcp_v6_iif(opt_skb);
1578 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1579 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1580 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1581 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1583 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1584 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1585 skb_set_owner_r(opt_skb, sk);
1586 tcp_v6_restore_cb(opt_skb);
1587 opt_skb = xchg(&np->pktoptions, opt_skb);
1589 __kfree_skb(opt_skb);
1590 opt_skb = xchg(&np->pktoptions, NULL);
1598 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1599 const struct tcphdr *th)
1601 /* This is tricky: we move IP6CB at its correct location into
1602 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1603 * _decode_session6() uses IP6CB().
1604 * barrier() makes sure compiler won't play aliasing games.
1606 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1607 sizeof(struct inet6_skb_parm));
1610 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1611 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1612 skb->len - th->doff*4);
1613 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1614 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1615 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1616 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1617 TCP_SKB_CB(skb)->sacked = 0;
1618 TCP_SKB_CB(skb)->has_rxtstamp =
1619 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1622 INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
1624 struct sk_buff *skb_to_free;
1625 int sdif = inet6_sdif(skb);
1626 int dif = inet6_iif(skb);
1627 const struct tcphdr *th;
1628 const struct ipv6hdr *hdr;
1632 struct net *net = dev_net(skb->dev);
1634 if (skb->pkt_type != PACKET_HOST)
1638 * Count it even if it's bad.
1640 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1642 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1645 th = (const struct tcphdr *)skb->data;
1647 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1649 if (!pskb_may_pull(skb, th->doff*4))
1652 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1655 th = (const struct tcphdr *)skb->data;
1656 hdr = ipv6_hdr(skb);
1659 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1660 th->source, th->dest, inet6_iif(skb), sdif,
1666 if (sk->sk_state == TCP_TIME_WAIT)
1669 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1670 struct request_sock *req = inet_reqsk(sk);
1671 bool req_stolen = false;
1674 sk = req->rsk_listener;
1675 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
1676 sk_drops_add(sk, skb);
1680 if (tcp_checksum_complete(skb)) {
1684 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1685 nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
1687 inet_csk_reqsk_queue_drop_and_put(sk, req);
1691 /* reuseport_migrate_sock() has already held one sk_refcnt
1699 if (!tcp_filter(sk, skb)) {
1700 th = (const struct tcphdr *)skb->data;
1701 hdr = ipv6_hdr(skb);
1702 tcp_v6_fill_cb(skb, hdr, th);
1703 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1708 /* Another cpu got exclusive access to req
1709 * and created a full blown socket.
1710 * Try to feed this packet to this socket
1711 * instead of discarding it.
1713 tcp_v6_restore_cb(skb);
1717 goto discard_and_relse;
1721 tcp_v6_restore_cb(skb);
1722 } else if (tcp_child_process(sk, nsk, skb)) {
1723 tcp_v6_send_reset(nsk, skb);
1724 goto discard_and_relse;
1730 if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
1731 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1732 goto discard_and_relse;
1735 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1736 goto discard_and_relse;
1738 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
1739 goto discard_and_relse;
1741 if (tcp_filter(sk, skb))
1742 goto discard_and_relse;
1743 th = (const struct tcphdr *)skb->data;
1744 hdr = ipv6_hdr(skb);
1745 tcp_v6_fill_cb(skb, hdr, th);
1749 if (sk->sk_state == TCP_LISTEN) {
1750 ret = tcp_v6_do_rcv(sk, skb);
1751 goto put_and_return;
1754 sk_incoming_cpu_update(sk);
1756 bh_lock_sock_nested(sk);
1757 tcp_segs_in(tcp_sk(sk), skb);
1759 if (!sock_owned_by_user(sk)) {
1760 skb_to_free = sk->sk_rx_skb_cache;
1761 sk->sk_rx_skb_cache = NULL;
1762 ret = tcp_v6_do_rcv(sk, skb);
1764 if (tcp_add_backlog(sk, skb))
1765 goto discard_and_relse;
1770 __kfree_skb(skb_to_free);
1774 return ret ? -1 : 0;
1777 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1780 tcp_v6_fill_cb(skb, hdr, th);
1782 if (tcp_checksum_complete(skb)) {
1784 trace_tcp_bad_csum(skb);
1785 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1787 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1789 tcp_v6_send_reset(NULL, skb);
1797 sk_drops_add(sk, skb);
1803 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1804 inet_twsk_put(inet_twsk(sk));
1808 tcp_v6_fill_cb(skb, hdr, th);
1810 if (tcp_checksum_complete(skb)) {
1811 inet_twsk_put(inet_twsk(sk));
1815 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1820 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1821 skb, __tcp_hdrlen(th),
1822 &ipv6_hdr(skb)->saddr, th->source,
1823 &ipv6_hdr(skb)->daddr,
1825 tcp_v6_iif_l3_slave(skb),
1828 struct inet_timewait_sock *tw = inet_twsk(sk);
1829 inet_twsk_deschedule_put(tw);
1831 tcp_v6_restore_cb(skb);
1839 tcp_v6_timewait_ack(sk, skb);
1842 tcp_v6_send_reset(sk, skb);
1843 inet_twsk_deschedule_put(inet_twsk(sk));
1845 case TCP_TW_SUCCESS:
1851 INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
1853 const struct ipv6hdr *hdr;
1854 const struct tcphdr *th;
1857 if (skb->pkt_type != PACKET_HOST)
1860 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1863 hdr = ipv6_hdr(skb);
1866 if (th->doff < sizeof(struct tcphdr) / 4)
1869 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1870 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1871 &hdr->saddr, th->source,
1872 &hdr->daddr, ntohs(th->dest),
1873 inet6_iif(skb), inet6_sdif(skb));
1876 skb->destructor = sock_edemux;
1877 if (sk_fullsock(sk)) {
1878 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1881 dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
1883 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1884 skb_dst_set_noref(skb, dst);
1889 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1890 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1891 .twsk_unique = tcp_twsk_unique,
1892 .twsk_destructor = tcp_twsk_destructor,
1895 INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
1897 struct ipv6_pinfo *np = inet6_sk(sk);
1899 __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
1902 const struct inet_connection_sock_af_ops ipv6_specific = {
1903 .queue_xmit = inet6_csk_xmit,
1904 .send_check = tcp_v6_send_check,
1905 .rebuild_header = inet6_sk_rebuild_header,
1906 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1907 .conn_request = tcp_v6_conn_request,
1908 .syn_recv_sock = tcp_v6_syn_recv_sock,
1909 .net_header_len = sizeof(struct ipv6hdr),
1910 .net_frag_header_len = sizeof(struct frag_hdr),
1911 .setsockopt = ipv6_setsockopt,
1912 .getsockopt = ipv6_getsockopt,
1913 .addr2sockaddr = inet6_csk_addr2sockaddr,
1914 .sockaddr_len = sizeof(struct sockaddr_in6),
1915 .mtu_reduced = tcp_v6_mtu_reduced,
1918 #ifdef CONFIG_TCP_MD5SIG
1919 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1920 .md5_lookup = tcp_v6_md5_lookup,
1921 .calc_md5_hash = tcp_v6_md5_hash_skb,
1922 .md5_parse = tcp_v6_parse_md5_keys,
1927 * TCP over IPv4 via INET6 API
1929 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1930 .queue_xmit = ip_queue_xmit,
1931 .send_check = tcp_v4_send_check,
1932 .rebuild_header = inet_sk_rebuild_header,
1933 .sk_rx_dst_set = inet_sk_rx_dst_set,
1934 .conn_request = tcp_v6_conn_request,
1935 .syn_recv_sock = tcp_v6_syn_recv_sock,
1936 .net_header_len = sizeof(struct iphdr),
1937 .setsockopt = ipv6_setsockopt,
1938 .getsockopt = ipv6_getsockopt,
1939 .addr2sockaddr = inet6_csk_addr2sockaddr,
1940 .sockaddr_len = sizeof(struct sockaddr_in6),
1941 .mtu_reduced = tcp_v4_mtu_reduced,
1944 #ifdef CONFIG_TCP_MD5SIG
1945 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1946 .md5_lookup = tcp_v4_md5_lookup,
1947 .calc_md5_hash = tcp_v4_md5_hash_skb,
1948 .md5_parse = tcp_v6_parse_md5_keys,
1952 /* NOTE: A lot of things set to zero explicitly by call to
1953 * sk_alloc() so need not be done here.
1955 static int tcp_v6_init_sock(struct sock *sk)
1957 struct inet_connection_sock *icsk = inet_csk(sk);
1961 icsk->icsk_af_ops = &ipv6_specific;
1963 #ifdef CONFIG_TCP_MD5SIG
1964 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1970 static void tcp_v6_destroy_sock(struct sock *sk)
1972 tcp_v4_destroy_sock(sk);
1973 inet6_destroy_sock(sk);
1976 #ifdef CONFIG_PROC_FS
1977 /* Proc filesystem TCPv6 sock list dumping. */
1978 static void get_openreq6(struct seq_file *seq,
1979 const struct request_sock *req, int i)
1981 long ttd = req->rsk_timer.expires - jiffies;
1982 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1983 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1989 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1990 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1992 src->s6_addr32[0], src->s6_addr32[1],
1993 src->s6_addr32[2], src->s6_addr32[3],
1994 inet_rsk(req)->ir_num,
1995 dest->s6_addr32[0], dest->s6_addr32[1],
1996 dest->s6_addr32[2], dest->s6_addr32[3],
1997 ntohs(inet_rsk(req)->ir_rmt_port),
1999 0, 0, /* could print option size, but that is af dependent. */
2000 1, /* timers active (only the expire timer) */
2001 jiffies_to_clock_t(ttd),
2003 from_kuid_munged(seq_user_ns(seq),
2004 sock_i_uid(req->rsk_listener)),
2005 0, /* non standard timer */
2006 0, /* open_requests have no inode */
2010 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2012 const struct in6_addr *dest, *src;
2015 unsigned long timer_expires;
2016 const struct inet_sock *inet = inet_sk(sp);
2017 const struct tcp_sock *tp = tcp_sk(sp);
2018 const struct inet_connection_sock *icsk = inet_csk(sp);
2019 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2023 dest = &sp->sk_v6_daddr;
2024 src = &sp->sk_v6_rcv_saddr;
2025 destp = ntohs(inet->inet_dport);
2026 srcp = ntohs(inet->inet_sport);
2028 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2029 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2030 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2032 timer_expires = icsk->icsk_timeout;
2033 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2035 timer_expires = icsk->icsk_timeout;
2036 } else if (timer_pending(&sp->sk_timer)) {
2038 timer_expires = sp->sk_timer.expires;
2041 timer_expires = jiffies;
2044 state = inet_sk_state_load(sp);
2045 if (state == TCP_LISTEN)
2046 rx_queue = READ_ONCE(sp->sk_ack_backlog);
2048 /* Because we don't lock the socket,
2049 * we might find a transient negative value.
2051 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2052 READ_ONCE(tp->copied_seq), 0);
2055 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2056 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
2058 src->s6_addr32[0], src->s6_addr32[1],
2059 src->s6_addr32[2], src->s6_addr32[3], srcp,
2060 dest->s6_addr32[0], dest->s6_addr32[1],
2061 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2063 READ_ONCE(tp->write_seq) - tp->snd_una,
2066 jiffies_delta_to_clock_t(timer_expires - jiffies),
2067 icsk->icsk_retransmits,
2068 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2069 icsk->icsk_probes_out,
2071 refcount_read(&sp->sk_refcnt), sp,
2072 jiffies_to_clock_t(icsk->icsk_rto),
2073 jiffies_to_clock_t(icsk->icsk_ack.ato),
2074 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
2076 state == TCP_LISTEN ?
2077 fastopenq->max_qlen :
2078 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
2082 static void get_timewait6_sock(struct seq_file *seq,
2083 struct inet_timewait_sock *tw, int i)
2085 long delta = tw->tw_timer.expires - jiffies;
2086 const struct in6_addr *dest, *src;
2089 dest = &tw->tw_v6_daddr;
2090 src = &tw->tw_v6_rcv_saddr;
2091 destp = ntohs(tw->tw_dport);
2092 srcp = ntohs(tw->tw_sport);
2095 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2096 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2098 src->s6_addr32[0], src->s6_addr32[1],
2099 src->s6_addr32[2], src->s6_addr32[3], srcp,
2100 dest->s6_addr32[0], dest->s6_addr32[1],
2101 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2102 tw->tw_substate, 0, 0,
2103 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2104 refcount_read(&tw->tw_refcnt), tw);
2107 static int tcp6_seq_show(struct seq_file *seq, void *v)
2109 struct tcp_iter_state *st;
2110 struct sock *sk = v;
2112 if (v == SEQ_START_TOKEN) {
2117 "st tx_queue rx_queue tr tm->when retrnsmt"
2118 " uid timeout inode\n");
2123 if (sk->sk_state == TCP_TIME_WAIT)
2124 get_timewait6_sock(seq, v, st->num);
2125 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2126 get_openreq6(seq, v, st->num);
2128 get_tcp6_sock(seq, v, st->num);
2133 static const struct seq_operations tcp6_seq_ops = {
2134 .show = tcp6_seq_show,
2135 .start = tcp_seq_start,
2136 .next = tcp_seq_next,
2137 .stop = tcp_seq_stop,
2140 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2144 int __net_init tcp6_proc_init(struct net *net)
2146 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2147 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2152 void tcp6_proc_exit(struct net *net)
2154 remove_proc_entry("tcp6", net->proc_net);
2158 struct proto tcpv6_prot = {
2160 .owner = THIS_MODULE,
2162 .pre_connect = tcp_v6_pre_connect,
2163 .connect = tcp_v6_connect,
2164 .disconnect = tcp_disconnect,
2165 .accept = inet_csk_accept,
2167 .init = tcp_v6_init_sock,
2168 .destroy = tcp_v6_destroy_sock,
2169 .shutdown = tcp_shutdown,
2170 .setsockopt = tcp_setsockopt,
2171 .getsockopt = tcp_getsockopt,
2172 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
2173 .keepalive = tcp_set_keepalive,
2174 .recvmsg = tcp_recvmsg,
2175 .sendmsg = tcp_sendmsg,
2176 .sendpage = tcp_sendpage,
2177 .backlog_rcv = tcp_v6_do_rcv,
2178 .release_cb = tcp_release_cb,
2180 .unhash = inet_unhash,
2181 .get_port = inet_csk_get_port,
2182 #ifdef CONFIG_BPF_SYSCALL
2183 .psock_update_sk_prot = tcp_bpf_update_proto,
2185 .enter_memory_pressure = tcp_enter_memory_pressure,
2186 .leave_memory_pressure = tcp_leave_memory_pressure,
2187 .stream_memory_free = tcp_stream_memory_free,
2188 .sockets_allocated = &tcp_sockets_allocated,
2189 .memory_allocated = &tcp_memory_allocated,
2190 .memory_pressure = &tcp_memory_pressure,
2191 .orphan_count = &tcp_orphan_count,
2192 .sysctl_mem = sysctl_tcp_mem,
2193 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2194 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2195 .max_header = MAX_TCP_HEADER,
2196 .obj_size = sizeof(struct tcp6_sock),
2197 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2198 .twsk_prot = &tcp6_timewait_sock_ops,
2199 .rsk_prot = &tcp6_request_sock_ops,
2200 .h.hashinfo = &tcp_hashinfo,
2201 .no_autobind = true,
2202 .diag_destroy = tcp_abort,
2204 EXPORT_SYMBOL_GPL(tcpv6_prot);
2206 /* thinking of making this const? Don't.
2207 * early_demux can change based on sysctl.
2209 static struct inet6_protocol tcpv6_protocol = {
2210 .early_demux = tcp_v6_early_demux,
2211 .early_demux_handler = tcp_v6_early_demux,
2212 .handler = tcp_v6_rcv,
2213 .err_handler = tcp_v6_err,
2214 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2217 static struct inet_protosw tcpv6_protosw = {
2218 .type = SOCK_STREAM,
2219 .protocol = IPPROTO_TCP,
2220 .prot = &tcpv6_prot,
2221 .ops = &inet6_stream_ops,
2222 .flags = INET_PROTOSW_PERMANENT |
2226 static int __net_init tcpv6_net_init(struct net *net)
2228 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2229 SOCK_RAW, IPPROTO_TCP, net);
2232 static void __net_exit tcpv6_net_exit(struct net *net)
2234 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2237 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2239 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2242 static struct pernet_operations tcpv6_net_ops = {
2243 .init = tcpv6_net_init,
2244 .exit = tcpv6_net_exit,
2245 .exit_batch = tcpv6_net_exit_batch,
2248 int __init tcpv6_init(void)
2252 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2256 /* register inet6 protocol */
2257 ret = inet6_register_protosw(&tcpv6_protosw);
2259 goto out_tcpv6_protocol;
2261 ret = register_pernet_subsys(&tcpv6_net_ops);
2263 goto out_tcpv6_protosw;
2265 ret = mptcpv6_init();
2267 goto out_tcpv6_pernet_subsys;
2272 out_tcpv6_pernet_subsys:
2273 unregister_pernet_subsys(&tcpv6_net_ops);
2275 inet6_unregister_protosw(&tcpv6_protosw);
2277 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2281 void tcpv6_exit(void)
2283 unregister_pernet_subsys(&tcpv6_net_ops);
2284 inet6_unregister_protosw(&tcpv6_protosw);
2285 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);