3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 struct dst_entry *dst = skb_dst(skb);
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
104 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
109 tcp_hdr(skb)->source);
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 struct inet_sock *inet = inet_sk(sk);
117 struct inet_connection_sock *icsk = inet_csk(sk);
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
120 struct in6_addr *saddr = NULL, *final_p, final;
121 struct ipv6_txoptions *opt;
123 struct dst_entry *dst;
127 if (addr_len < SIN6_LEN_RFC2133)
130 if (usin->sin6_family != AF_INET6)
131 return -EAFNOSUPPORT;
133 memset(&fl6, 0, sizeof(fl6));
136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 struct ip6_flowlabel *flowlabel;
140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
143 fl6_sock_release(flowlabel);
148 * connect() to INADDR_ANY means loopback (BSD'ism).
151 if (ipv6_addr_any(&usin->sin6_addr))
152 usin->sin6_addr.s6_addr[15] = 0x1;
154 addr_type = ipv6_addr_type(&usin->sin6_addr);
156 if (addr_type & IPV6_ADDR_MULTICAST)
159 if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 if (addr_len >= sizeof(struct sockaddr_in6) &&
161 usin->sin6_scope_id) {
162 /* If interface is set while binding, indices
165 if (sk->sk_bound_dev_if &&
166 sk->sk_bound_dev_if != usin->sin6_scope_id)
169 sk->sk_bound_dev_if = usin->sin6_scope_id;
172 /* Connect to link-local address requires an interface */
173 if (!sk->sk_bound_dev_if)
177 if (tp->rx_opt.ts_recent_stamp &&
178 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179 tp->rx_opt.ts_recent = 0;
180 tp->rx_opt.ts_recent_stamp = 0;
184 sk->sk_v6_daddr = usin->sin6_addr;
185 np->flow_label = fl6.flowlabel;
191 if (addr_type == IPV6_ADDR_MAPPED) {
192 u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 struct sockaddr_in sin;
195 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197 if (__ipv6_only_sock(sk))
200 sin.sin_family = AF_INET;
201 sin.sin_port = usin->sin6_port;
202 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204 icsk->icsk_af_ops = &ipv6_mapped;
205 sk->sk_backlog_rcv = tcp_v4_do_rcv;
206 #ifdef CONFIG_TCP_MD5SIG
207 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
210 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
213 icsk->icsk_ext_hdr_len = exthdrlen;
214 icsk->icsk_af_ops = &ipv6_specific;
215 sk->sk_backlog_rcv = tcp_v6_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_specific;
221 np->saddr = sk->sk_v6_rcv_saddr;
226 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 saddr = &sk->sk_v6_rcv_saddr;
229 fl6.flowi6_proto = IPPROTO_TCP;
230 fl6.daddr = sk->sk_v6_daddr;
231 fl6.saddr = saddr ? *saddr : np->saddr;
232 fl6.flowi6_oif = sk->sk_bound_dev_if;
233 fl6.flowi6_mark = sk->sk_mark;
234 fl6.fl6_dport = usin->sin6_port;
235 fl6.fl6_sport = inet->inet_sport;
237 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
238 final_p = fl6_update_dst(&fl6, opt, &final);
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
250 sk->sk_v6_rcv_saddr = *saddr;
253 /* set the source address */
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 ip6_dst_store(sk, dst, NULL, NULL);
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
265 icsk->icsk_ext_hdr_len = 0;
267 icsk->icsk_ext_hdr_len = opt->opt_flen +
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
272 inet->inet_dport = usin->sin6_port;
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
287 err = tcp_connect(sk);
294 tcp_set_state(sk, TCP_CLOSE);
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
302 static void tcp_v6_mtu_reduced(struct sock *sk)
304 struct dst_entry *dst;
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
333 sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 &hdr->daddr, th->dest,
335 &hdr->saddr, ntohs(th->source),
339 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
344 if (sk->sk_state == TCP_TIME_WAIT) {
345 inet_twsk_put(inet_twsk(sk));
348 seq = ntohl(th->seq);
349 fatal = icmpv6_err_convert(type, code, &err);
350 if (sk->sk_state == TCP_NEW_SYN_RECV)
351 return tcp_req_err(sk, seq, fatal);
354 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
357 if (sk->sk_state == TCP_CLOSE)
360 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
366 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 fastopen = tp->fastopen_rsk;
368 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 if (sk->sk_state != TCP_LISTEN &&
370 !between(seq, snd_una, tp->snd_nxt)) {
371 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
377 if (type == NDISC_REDIRECT) {
378 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
381 dst->ops->redirect(dst, sk, skb);
385 if (type == ICMPV6_PKT_TOOBIG) {
386 /* We are not interested in TCP_LISTEN and open_requests
387 * (SYN-ACKs send out by Linux are always <576bytes so
388 * they should go through unfragmented).
390 if (sk->sk_state == TCP_LISTEN)
393 if (!ip6_sk_accept_pmtu(sk))
396 tp->mtu_info = ntohl(info);
397 if (!sock_owned_by_user(sk))
398 tcp_v6_mtu_reduced(sk);
399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
406 /* Might be for an request_sock */
407 switch (sk->sk_state) {
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
413 if (fastopen && !fastopen->sk)
416 if (!sock_owned_by_user(sk)) {
418 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
422 sk->sk_err_soft = err;
426 if (!sock_owned_by_user(sk) && np->recverr) {
428 sk->sk_error_report(sk);
430 sk->sk_err_soft = err;
438 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
440 struct request_sock *req,
441 struct tcp_fastopen_cookie *foc,
442 enum tcp_synack_type synack_type)
444 struct inet_request_sock *ireq = inet_rsk(req);
445 struct ipv6_pinfo *np = inet6_sk(sk);
446 struct ipv6_txoptions *opt;
447 struct flowi6 *fl6 = &fl->u.ip6;
451 /* First, grab a route. */
452 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
453 IPPROTO_TCP)) == NULL)
456 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
459 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 &ireq->ir_v6_rmt_addr);
462 fl6->daddr = ireq->ir_v6_rmt_addr;
463 if (np->repflow && ireq->pktopts)
464 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
467 opt = ireq->ipv6_opt;
469 opt = rcu_dereference(np->opt);
470 err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
472 err = net_xmit_eval(err);
480 static void tcp_v6_reqsk_destructor(struct request_sock *req)
482 kfree(inet_rsk(req)->ipv6_opt);
483 kfree_skb(inet_rsk(req)->pktopts);
486 #ifdef CONFIG_TCP_MD5SIG
487 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
488 const struct in6_addr *addr)
490 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
493 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
494 const struct sock *addr_sk)
496 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
499 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
502 struct tcp_md5sig cmd;
503 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
505 if (optlen < sizeof(cmd))
508 if (copy_from_user(&cmd, optval, sizeof(cmd)))
511 if (sin6->sin6_family != AF_INET6)
514 if (!cmd.tcpm_keylen) {
515 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
516 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
518 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
522 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
525 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
526 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
527 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
529 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
530 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
533 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
534 const struct in6_addr *daddr,
535 const struct in6_addr *saddr,
536 const struct tcphdr *th, int nbytes)
538 struct tcp6_pseudohdr *bp;
539 struct scatterlist sg;
543 /* 1. TCP pseudo-header (RFC2460) */
546 bp->protocol = cpu_to_be32(IPPROTO_TCP);
547 bp->len = cpu_to_be32(nbytes);
549 _th = (struct tcphdr *)(bp + 1);
550 memcpy(_th, th, sizeof(*th));
553 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
554 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
555 sizeof(*bp) + sizeof(*th));
556 return crypto_ahash_update(hp->md5_req);
559 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
560 const struct in6_addr *daddr, struct in6_addr *saddr,
561 const struct tcphdr *th)
563 struct tcp_md5sig_pool *hp;
564 struct ahash_request *req;
566 hp = tcp_get_md5sig_pool();
568 goto clear_hash_noput;
571 if (crypto_ahash_init(req))
573 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
575 if (tcp_md5_hash_key(hp, key))
577 ahash_request_set_crypt(req, NULL, md5_hash, 0);
578 if (crypto_ahash_final(req))
581 tcp_put_md5sig_pool();
585 tcp_put_md5sig_pool();
587 memset(md5_hash, 0, 16);
591 static int tcp_v6_md5_hash_skb(char *md5_hash,
592 const struct tcp_md5sig_key *key,
593 const struct sock *sk,
594 const struct sk_buff *skb)
596 const struct in6_addr *saddr, *daddr;
597 struct tcp_md5sig_pool *hp;
598 struct ahash_request *req;
599 const struct tcphdr *th = tcp_hdr(skb);
601 if (sk) { /* valid for establish/request sockets */
602 saddr = &sk->sk_v6_rcv_saddr;
603 daddr = &sk->sk_v6_daddr;
605 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
606 saddr = &ip6h->saddr;
607 daddr = &ip6h->daddr;
610 hp = tcp_get_md5sig_pool();
612 goto clear_hash_noput;
615 if (crypto_ahash_init(req))
618 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
620 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
622 if (tcp_md5_hash_key(hp, key))
624 ahash_request_set_crypt(req, NULL, md5_hash, 0);
625 if (crypto_ahash_final(req))
628 tcp_put_md5sig_pool();
632 tcp_put_md5sig_pool();
634 memset(md5_hash, 0, 16);
640 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
641 const struct sk_buff *skb)
643 #ifdef CONFIG_TCP_MD5SIG
644 const __u8 *hash_location = NULL;
645 struct tcp_md5sig_key *hash_expected;
646 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
647 const struct tcphdr *th = tcp_hdr(skb);
651 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
652 hash_location = tcp_parse_md5sig_option(th);
654 /* We've parsed the options - do we have a hash? */
655 if (!hash_expected && !hash_location)
658 if (hash_expected && !hash_location) {
659 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
663 if (!hash_expected && hash_location) {
664 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
668 /* check the signature */
669 genhash = tcp_v6_md5_hash_skb(newhash,
673 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
674 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
675 genhash ? "failed" : "mismatch",
676 &ip6h->saddr, ntohs(th->source),
677 &ip6h->daddr, ntohs(th->dest));
684 static void tcp_v6_init_req(struct request_sock *req,
685 const struct sock *sk_listener,
688 struct inet_request_sock *ireq = inet_rsk(req);
689 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
691 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
692 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
694 /* So that link locals have meaning */
695 if (!sk_listener->sk_bound_dev_if &&
696 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
697 ireq->ir_iif = tcp_v6_iif(skb);
699 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
700 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
701 np->rxopt.bits.rxinfo ||
702 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
703 np->rxopt.bits.rxohlim || np->repflow)) {
704 atomic_inc(&skb->users);
709 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
711 const struct request_sock *req,
716 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
719 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
721 .obj_size = sizeof(struct tcp6_request_sock),
722 .rtx_syn_ack = tcp_rtx_synack,
723 .send_ack = tcp_v6_reqsk_send_ack,
724 .destructor = tcp_v6_reqsk_destructor,
725 .send_reset = tcp_v6_send_reset,
726 .syn_ack_timeout = tcp_syn_ack_timeout,
729 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
730 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
731 sizeof(struct ipv6hdr),
732 #ifdef CONFIG_TCP_MD5SIG
733 .req_md5_lookup = tcp_v6_md5_lookup,
734 .calc_md5_hash = tcp_v6_md5_hash_skb,
736 .init_req = tcp_v6_init_req,
737 #ifdef CONFIG_SYN_COOKIES
738 .cookie_init_seq = cookie_v6_init_sequence,
740 .route_req = tcp_v6_route_req,
741 .init_seq = tcp_v6_init_sequence,
742 .send_synack = tcp_v6_send_synack,
745 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
746 u32 ack, u32 win, u32 tsval, u32 tsecr,
747 int oif, struct tcp_md5sig_key *key, int rst,
748 u8 tclass, __be32 label)
750 const struct tcphdr *th = tcp_hdr(skb);
752 struct sk_buff *buff;
754 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
755 struct sock *ctl_sk = net->ipv6.tcp_sk;
756 unsigned int tot_len = sizeof(struct tcphdr);
757 struct dst_entry *dst;
761 tot_len += TCPOLEN_TSTAMP_ALIGNED;
762 #ifdef CONFIG_TCP_MD5SIG
764 tot_len += TCPOLEN_MD5SIG_ALIGNED;
767 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
772 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
774 t1 = (struct tcphdr *) skb_push(buff, tot_len);
775 skb_reset_transport_header(buff);
777 /* Swap the send and the receive. */
778 memset(t1, 0, sizeof(*t1));
779 t1->dest = th->source;
780 t1->source = th->dest;
781 t1->doff = tot_len / 4;
782 t1->seq = htonl(seq);
783 t1->ack_seq = htonl(ack);
784 t1->ack = !rst || !th->ack;
786 t1->window = htons(win);
788 topt = (__be32 *)(t1 + 1);
791 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
792 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
793 *topt++ = htonl(tsval);
794 *topt++ = htonl(tsecr);
797 #ifdef CONFIG_TCP_MD5SIG
799 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
800 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
801 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
802 &ipv6_hdr(skb)->saddr,
803 &ipv6_hdr(skb)->daddr, t1);
807 memset(&fl6, 0, sizeof(fl6));
808 fl6.daddr = ipv6_hdr(skb)->saddr;
809 fl6.saddr = ipv6_hdr(skb)->daddr;
810 fl6.flowlabel = label;
812 buff->ip_summed = CHECKSUM_PARTIAL;
815 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
817 fl6.flowi6_proto = IPPROTO_TCP;
818 if (rt6_need_strict(&fl6.daddr) && !oif)
819 fl6.flowi6_oif = tcp_v6_iif(skb);
821 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
824 fl6.flowi6_oif = oif;
827 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
828 fl6.fl6_dport = t1->dest;
829 fl6.fl6_sport = t1->source;
830 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
832 /* Pass a socket to ip6_dst_lookup either it is for RST
833 * Underlying function will use this to retrieve the network
836 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
838 skb_dst_set(buff, dst);
839 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
840 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
842 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
849 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
851 const struct tcphdr *th = tcp_hdr(skb);
852 u32 seq = 0, ack_seq = 0;
853 struct tcp_md5sig_key *key = NULL;
854 #ifdef CONFIG_TCP_MD5SIG
855 const __u8 *hash_location = NULL;
856 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
857 unsigned char newhash[16];
859 struct sock *sk1 = NULL;
866 /* If sk not NULL, it means we did a successful lookup and incoming
867 * route had to be correct. prequeue might have dropped our dst.
869 if (!sk && !ipv6_unicast_destination(skb))
872 #ifdef CONFIG_TCP_MD5SIG
874 hash_location = tcp_parse_md5sig_option(th);
875 if (sk && sk_fullsock(sk)) {
876 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
877 } else if (hash_location) {
879 * active side is lost. Try to find listening socket through
880 * source port, and then find md5 key through listening socket.
881 * we are not loose security here:
882 * Incoming packet is checked with md5 hash with finding key,
883 * no RST generated if md5 hash doesn't match.
885 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
886 &tcp_hashinfo, NULL, 0,
888 th->source, &ipv6h->daddr,
889 ntohs(th->source), tcp_v6_iif(skb));
893 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
897 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
898 if (genhash || memcmp(hash_location, newhash, 16) != 0)
904 seq = ntohl(th->ack_seq);
906 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
909 oif = sk ? sk->sk_bound_dev_if : 0;
910 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
912 #ifdef CONFIG_TCP_MD5SIG
918 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
919 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
920 struct tcp_md5sig_key *key, u8 tclass,
923 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
927 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
929 struct inet_timewait_sock *tw = inet_twsk(sk);
930 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
932 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
933 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
934 tcp_time_stamp + tcptw->tw_ts_offset,
935 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
936 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
941 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
942 struct request_sock *req)
944 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
945 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
947 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
948 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
949 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
950 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
951 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
956 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
958 #ifdef CONFIG_SYN_COOKIES
959 const struct tcphdr *th = tcp_hdr(skb);
962 sk = cookie_v6_check(sk, skb);
967 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
969 if (skb->protocol == htons(ETH_P_IP))
970 return tcp_v4_conn_request(sk, skb);
972 if (!ipv6_unicast_destination(skb))
975 return tcp_conn_request(&tcp6_request_sock_ops,
976 &tcp_request_sock_ipv6_ops, sk, skb);
980 return 0; /* don't send reset */
983 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
984 struct request_sock *req,
985 struct dst_entry *dst,
986 struct request_sock *req_unhash,
989 struct inet_request_sock *ireq;
990 struct ipv6_pinfo *newnp;
991 const struct ipv6_pinfo *np = inet6_sk(sk);
992 struct ipv6_txoptions *opt;
993 struct tcp6_sock *newtcp6sk;
994 struct inet_sock *newinet;
995 struct tcp_sock *newtp;
997 #ifdef CONFIG_TCP_MD5SIG
998 struct tcp_md5sig_key *key;
1002 if (skb->protocol == htons(ETH_P_IP)) {
1007 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1008 req_unhash, own_req);
1013 newtcp6sk = (struct tcp6_sock *)newsk;
1014 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1016 newinet = inet_sk(newsk);
1017 newnp = inet6_sk(newsk);
1018 newtp = tcp_sk(newsk);
1020 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1022 newnp->saddr = newsk->sk_v6_rcv_saddr;
1024 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1025 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1026 #ifdef CONFIG_TCP_MD5SIG
1027 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1030 newnp->ipv6_ac_list = NULL;
1031 newnp->ipv6_fl_list = NULL;
1032 newnp->pktoptions = NULL;
1034 newnp->mcast_oif = tcp_v6_iif(skb);
1035 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1036 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1038 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1041 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1042 * here, tcp_create_openreq_child now does this for us, see the comment in
1043 * that function for the gory details. -acme
1046 /* It is tricky place. Until this moment IPv4 tcp
1047 worked with IPv6 icsk.icsk_af_ops.
1050 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1055 ireq = inet_rsk(req);
1057 if (sk_acceptq_is_full(sk))
1061 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1066 newsk = tcp_create_openreq_child(sk, req, skb);
1071 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1072 * count here, tcp_create_openreq_child now does this for us, see the
1073 * comment in that function for the gory details. -acme
1076 newsk->sk_gso_type = SKB_GSO_TCPV6;
1077 ip6_dst_store(newsk, dst, NULL, NULL);
1078 inet6_sk_rx_dst_set(newsk, skb);
1080 newtcp6sk = (struct tcp6_sock *)newsk;
1081 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1083 newtp = tcp_sk(newsk);
1084 newinet = inet_sk(newsk);
1085 newnp = inet6_sk(newsk);
1087 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1089 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1090 newnp->saddr = ireq->ir_v6_loc_addr;
1091 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1092 newsk->sk_bound_dev_if = ireq->ir_iif;
1094 /* Now IPv6 options...
1096 First: no IPv4 options.
1098 newinet->inet_opt = NULL;
1099 newnp->ipv6_ac_list = NULL;
1100 newnp->ipv6_fl_list = NULL;
1103 newnp->rxopt.all = np->rxopt.all;
1105 newnp->pktoptions = NULL;
1107 newnp->mcast_oif = tcp_v6_iif(skb);
1108 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1109 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1111 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1113 /* Clone native IPv6 options from listening socket (if any)
1115 Yes, keeping reference count would be much more clever,
1116 but we make one more one thing there: reattach optmem
1119 opt = ireq->ipv6_opt;
1121 opt = rcu_dereference(np->opt);
1123 opt = ipv6_dup_options(newsk, opt);
1124 RCU_INIT_POINTER(newnp->opt, opt);
1126 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1128 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1131 tcp_ca_openreq_child(newsk, dst);
1133 tcp_sync_mss(newsk, dst_mtu(dst));
1134 newtp->advmss = dst_metric_advmss(dst);
1135 if (tcp_sk(sk)->rx_opt.user_mss &&
1136 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1137 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1139 tcp_initialize_rcv_mss(newsk);
1141 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1142 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1144 #ifdef CONFIG_TCP_MD5SIG
1145 /* Copy over the MD5 key from the original socket */
1146 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1148 /* We're using one, so create a matching key
1149 * on the newsk structure. If we fail to get
1150 * memory, then we end up not copying the key
1153 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1154 AF_INET6, key->key, key->keylen,
1155 sk_gfp_mask(sk, GFP_ATOMIC));
1159 if (__inet_inherit_port(sk, newsk) < 0) {
1160 inet_csk_prepare_forced_close(newsk);
1164 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1166 tcp_move_syn(newtp, req);
1168 /* Clone pktoptions received with SYN, if we own the req */
1169 if (ireq->pktopts) {
1170 newnp->pktoptions = skb_clone(ireq->pktopts,
1171 sk_gfp_mask(sk, GFP_ATOMIC));
1172 consume_skb(ireq->pktopts);
1173 ireq->pktopts = NULL;
1174 if (newnp->pktoptions)
1175 skb_set_owner_r(newnp->pktoptions, newsk);
1182 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1190 /* The socket must have it's spinlock held when we get
1191 * here, unless it is a TCP_LISTEN socket.
1193 * We have a potential double-lock case here, so even when
1194 * doing backlog processing we use the BH locking scheme.
1195 * This is because we cannot sleep with the original spinlock
1198 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1200 struct ipv6_pinfo *np = inet6_sk(sk);
1201 struct tcp_sock *tp;
1202 struct sk_buff *opt_skb = NULL;
1204 /* Imagine: socket is IPv6. IPv4 packet arrives,
1205 goes to IPv4 receive handler and backlogged.
1206 From backlog it always goes here. Kerboom...
1207 Fortunately, tcp_rcv_established and rcv_established
1208 handle them correctly, but it is not case with
1209 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1212 if (skb->protocol == htons(ETH_P_IP))
1213 return tcp_v4_do_rcv(sk, skb);
1215 if (sk_filter(sk, skb))
1219 * socket locking is here for SMP purposes as backlog rcv
1220 * is currently called with bh processing disabled.
1223 /* Do Stevens' IPV6_PKTOPTIONS.
1225 Yes, guys, it is the only place in our code, where we
1226 may make it not affecting IPv4.
1227 The rest of code is protocol independent,
1228 and I do not like idea to uglify IPv4.
1230 Actually, all the idea behind IPV6_PKTOPTIONS
1231 looks not very well thought. For now we latch
1232 options, received in the last packet, enqueued
1233 by tcp. Feel free to propose better solution.
1237 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1239 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1240 struct dst_entry *dst = sk->sk_rx_dst;
1242 sock_rps_save_rxhash(sk, skb);
1243 sk_mark_napi_id(sk, skb);
1245 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1246 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1248 sk->sk_rx_dst = NULL;
1252 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1254 goto ipv6_pktoptions;
1258 if (tcp_checksum_complete(skb))
1261 if (sk->sk_state == TCP_LISTEN) {
1262 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1268 sock_rps_save_rxhash(nsk, skb);
1269 sk_mark_napi_id(nsk, skb);
1270 if (tcp_child_process(sk, nsk, skb))
1273 __kfree_skb(opt_skb);
1277 sock_rps_save_rxhash(sk, skb);
1279 if (tcp_rcv_state_process(sk, skb))
1282 goto ipv6_pktoptions;
1286 tcp_v6_send_reset(sk, skb);
1289 __kfree_skb(opt_skb);
1293 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1294 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1299 /* Do you ask, what is it?
1301 1. skb was enqueued by tcp.
1302 2. skb is added to tail of read queue, rather than out of order.
1303 3. socket is not in passive state.
1304 4. Finally, it really contains options, which user wants to receive.
1307 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1308 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1309 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1310 np->mcast_oif = tcp_v6_iif(opt_skb);
1311 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1312 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1313 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1314 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1316 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1317 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1318 skb_set_owner_r(opt_skb, sk);
1319 opt_skb = xchg(&np->pktoptions, opt_skb);
1321 __kfree_skb(opt_skb);
1322 opt_skb = xchg(&np->pktoptions, NULL);
1330 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1331 const struct tcphdr *th)
1333 /* This is tricky: we move IP6CB at its correct location into
1334 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1335 * _decode_session6() uses IP6CB().
1336 * barrier() makes sure compiler won't play aliasing games.
1338 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1339 sizeof(struct inet6_skb_parm));
1342 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1343 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1344 skb->len - th->doff*4);
1345 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1346 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1347 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1348 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1349 TCP_SKB_CB(skb)->sacked = 0;
1352 static void tcp_v6_restore_cb(struct sk_buff *skb)
1354 /* We need to move header back to the beginning if xfrm6_policy_check()
1355 * and tcp_v6_fill_cb() are going to be called again.
1357 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1358 sizeof(struct inet6_skb_parm));
1361 static int tcp_v6_rcv(struct sk_buff *skb)
1363 const struct tcphdr *th;
1364 const struct ipv6hdr *hdr;
1368 struct net *net = dev_net(skb->dev);
1370 if (skb->pkt_type != PACKET_HOST)
1374 * Count it even if it's bad.
1376 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1378 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1381 th = (const struct tcphdr *)skb->data;
1383 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1385 if (!pskb_may_pull(skb, th->doff*4))
1388 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1391 th = (const struct tcphdr *)skb->data;
1392 hdr = ipv6_hdr(skb);
1395 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1396 th->source, th->dest, inet6_iif(skb),
1402 if (sk->sk_state == TCP_TIME_WAIT)
1405 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1406 struct request_sock *req = inet_reqsk(sk);
1409 sk = req->rsk_listener;
1410 tcp_v6_fill_cb(skb, hdr, th);
1411 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1415 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1416 inet_csk_reqsk_queue_drop_and_put(sk, req);
1421 nsk = tcp_check_req(sk, skb, req, false);
1424 goto discard_and_relse;
1428 tcp_v6_restore_cb(skb);
1429 } else if (tcp_child_process(sk, nsk, skb)) {
1430 tcp_v6_send_reset(nsk, skb);
1431 goto discard_and_relse;
1437 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1438 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1439 goto discard_and_relse;
1442 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1443 goto discard_and_relse;
1445 tcp_v6_fill_cb(skb, hdr, th);
1447 if (tcp_v6_inbound_md5_hash(sk, skb))
1448 goto discard_and_relse;
1450 if (sk_filter(sk, skb))
1451 goto discard_and_relse;
1455 if (sk->sk_state == TCP_LISTEN) {
1456 ret = tcp_v6_do_rcv(sk, skb);
1457 goto put_and_return;
1460 sk_incoming_cpu_update(sk);
1462 bh_lock_sock_nested(sk);
1463 tcp_segs_in(tcp_sk(sk), skb);
1465 if (!sock_owned_by_user(sk)) {
1466 if (!tcp_prequeue(sk, skb))
1467 ret = tcp_v6_do_rcv(sk, skb);
1468 } else if (unlikely(sk_add_backlog(sk, skb,
1469 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1471 __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
1472 goto discard_and_relse;
1479 return ret ? -1 : 0;
1482 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1485 tcp_v6_fill_cb(skb, hdr, th);
1487 if (tcp_checksum_complete(skb)) {
1489 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1491 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1493 tcp_v6_send_reset(NULL, skb);
1501 sk_drops_add(sk, skb);
1507 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1508 inet_twsk_put(inet_twsk(sk));
1512 tcp_v6_fill_cb(skb, hdr, th);
1514 if (tcp_checksum_complete(skb)) {
1515 inet_twsk_put(inet_twsk(sk));
1519 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1524 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1525 skb, __tcp_hdrlen(th),
1526 &ipv6_hdr(skb)->saddr, th->source,
1527 &ipv6_hdr(skb)->daddr,
1528 ntohs(th->dest), tcp_v6_iif(skb));
1530 struct inet_timewait_sock *tw = inet_twsk(sk);
1531 inet_twsk_deschedule_put(tw);
1533 tcp_v6_restore_cb(skb);
1537 /* Fall through to ACK */
1540 tcp_v6_timewait_ack(sk, skb);
1543 tcp_v6_restore_cb(skb);
1544 tcp_v6_send_reset(sk, skb);
1545 inet_twsk_deschedule_put(inet_twsk(sk));
1547 case TCP_TW_SUCCESS:
1553 static void tcp_v6_early_demux(struct sk_buff *skb)
1555 const struct ipv6hdr *hdr;
1556 const struct tcphdr *th;
1559 if (skb->pkt_type != PACKET_HOST)
1562 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1565 hdr = ipv6_hdr(skb);
1568 if (th->doff < sizeof(struct tcphdr) / 4)
1571 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1572 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1573 &hdr->saddr, th->source,
1574 &hdr->daddr, ntohs(th->dest),
1578 skb->destructor = sock_edemux;
1579 if (sk_fullsock(sk)) {
1580 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1583 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1585 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1586 skb_dst_set_noref(skb, dst);
1591 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1592 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1593 .twsk_unique = tcp_twsk_unique,
1594 .twsk_destructor = tcp_twsk_destructor,
1597 static const struct inet_connection_sock_af_ops ipv6_specific = {
1598 .queue_xmit = inet6_csk_xmit,
1599 .send_check = tcp_v6_send_check,
1600 .rebuild_header = inet6_sk_rebuild_header,
1601 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1602 .conn_request = tcp_v6_conn_request,
1603 .syn_recv_sock = tcp_v6_syn_recv_sock,
1604 .net_header_len = sizeof(struct ipv6hdr),
1605 .net_frag_header_len = sizeof(struct frag_hdr),
1606 .setsockopt = ipv6_setsockopt,
1607 .getsockopt = ipv6_getsockopt,
1608 .addr2sockaddr = inet6_csk_addr2sockaddr,
1609 .sockaddr_len = sizeof(struct sockaddr_in6),
1610 .bind_conflict = inet6_csk_bind_conflict,
1611 #ifdef CONFIG_COMPAT
1612 .compat_setsockopt = compat_ipv6_setsockopt,
1613 .compat_getsockopt = compat_ipv6_getsockopt,
1615 .mtu_reduced = tcp_v6_mtu_reduced,
1618 #ifdef CONFIG_TCP_MD5SIG
1619 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1620 .md5_lookup = tcp_v6_md5_lookup,
1621 .calc_md5_hash = tcp_v6_md5_hash_skb,
1622 .md5_parse = tcp_v6_parse_md5_keys,
1627 * TCP over IPv4 via INET6 API
1629 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1630 .queue_xmit = ip_queue_xmit,
1631 .send_check = tcp_v4_send_check,
1632 .rebuild_header = inet_sk_rebuild_header,
1633 .sk_rx_dst_set = inet_sk_rx_dst_set,
1634 .conn_request = tcp_v6_conn_request,
1635 .syn_recv_sock = tcp_v6_syn_recv_sock,
1636 .net_header_len = sizeof(struct iphdr),
1637 .setsockopt = ipv6_setsockopt,
1638 .getsockopt = ipv6_getsockopt,
1639 .addr2sockaddr = inet6_csk_addr2sockaddr,
1640 .sockaddr_len = sizeof(struct sockaddr_in6),
1641 .bind_conflict = inet6_csk_bind_conflict,
1642 #ifdef CONFIG_COMPAT
1643 .compat_setsockopt = compat_ipv6_setsockopt,
1644 .compat_getsockopt = compat_ipv6_getsockopt,
1646 .mtu_reduced = tcp_v4_mtu_reduced,
1649 #ifdef CONFIG_TCP_MD5SIG
1650 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1651 .md5_lookup = tcp_v4_md5_lookup,
1652 .calc_md5_hash = tcp_v4_md5_hash_skb,
1653 .md5_parse = tcp_v6_parse_md5_keys,
1657 /* NOTE: A lot of things set to zero explicitly by call to
1658 * sk_alloc() so need not be done here.
1660 static int tcp_v6_init_sock(struct sock *sk)
1662 struct inet_connection_sock *icsk = inet_csk(sk);
1666 icsk->icsk_af_ops = &ipv6_specific;
1668 #ifdef CONFIG_TCP_MD5SIG
1669 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1675 static void tcp_v6_destroy_sock(struct sock *sk)
1677 tcp_v4_destroy_sock(sk);
1678 inet6_destroy_sock(sk);
1681 #ifdef CONFIG_PROC_FS
1682 /* Proc filesystem TCPv6 sock list dumping. */
1683 static void get_openreq6(struct seq_file *seq,
1684 const struct request_sock *req, int i)
1686 long ttd = req->rsk_timer.expires - jiffies;
1687 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1688 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1694 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1695 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1697 src->s6_addr32[0], src->s6_addr32[1],
1698 src->s6_addr32[2], src->s6_addr32[3],
1699 inet_rsk(req)->ir_num,
1700 dest->s6_addr32[0], dest->s6_addr32[1],
1701 dest->s6_addr32[2], dest->s6_addr32[3],
1702 ntohs(inet_rsk(req)->ir_rmt_port),
1704 0, 0, /* could print option size, but that is af dependent. */
1705 1, /* timers active (only the expire timer) */
1706 jiffies_to_clock_t(ttd),
1708 from_kuid_munged(seq_user_ns(seq),
1709 sock_i_uid(req->rsk_listener)),
1710 0, /* non standard timer */
1711 0, /* open_requests have no inode */
1715 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1717 const struct in6_addr *dest, *src;
1720 unsigned long timer_expires;
1721 const struct inet_sock *inet = inet_sk(sp);
1722 const struct tcp_sock *tp = tcp_sk(sp);
1723 const struct inet_connection_sock *icsk = inet_csk(sp);
1724 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1728 dest = &sp->sk_v6_daddr;
1729 src = &sp->sk_v6_rcv_saddr;
1730 destp = ntohs(inet->inet_dport);
1731 srcp = ntohs(inet->inet_sport);
1733 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1734 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1735 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1737 timer_expires = icsk->icsk_timeout;
1738 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1740 timer_expires = icsk->icsk_timeout;
1741 } else if (timer_pending(&sp->sk_timer)) {
1743 timer_expires = sp->sk_timer.expires;
1746 timer_expires = jiffies;
1749 state = sk_state_load(sp);
1750 if (state == TCP_LISTEN)
1751 rx_queue = sp->sk_ack_backlog;
1753 /* Because we don't lock the socket,
1754 * we might find a transient negative value.
1756 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1759 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1760 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1762 src->s6_addr32[0], src->s6_addr32[1],
1763 src->s6_addr32[2], src->s6_addr32[3], srcp,
1764 dest->s6_addr32[0], dest->s6_addr32[1],
1765 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1767 tp->write_seq - tp->snd_una,
1770 jiffies_delta_to_clock_t(timer_expires - jiffies),
1771 icsk->icsk_retransmits,
1772 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1773 icsk->icsk_probes_out,
1775 atomic_read(&sp->sk_refcnt), sp,
1776 jiffies_to_clock_t(icsk->icsk_rto),
1777 jiffies_to_clock_t(icsk->icsk_ack.ato),
1778 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1780 state == TCP_LISTEN ?
1781 fastopenq->max_qlen :
1782 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1786 static void get_timewait6_sock(struct seq_file *seq,
1787 struct inet_timewait_sock *tw, int i)
1789 long delta = tw->tw_timer.expires - jiffies;
1790 const struct in6_addr *dest, *src;
1793 dest = &tw->tw_v6_daddr;
1794 src = &tw->tw_v6_rcv_saddr;
1795 destp = ntohs(tw->tw_dport);
1796 srcp = ntohs(tw->tw_sport);
1799 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1800 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1802 src->s6_addr32[0], src->s6_addr32[1],
1803 src->s6_addr32[2], src->s6_addr32[3], srcp,
1804 dest->s6_addr32[0], dest->s6_addr32[1],
1805 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1806 tw->tw_substate, 0, 0,
1807 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1808 atomic_read(&tw->tw_refcnt), tw);
1811 static int tcp6_seq_show(struct seq_file *seq, void *v)
1813 struct tcp_iter_state *st;
1814 struct sock *sk = v;
1816 if (v == SEQ_START_TOKEN) {
1821 "st tx_queue rx_queue tr tm->when retrnsmt"
1822 " uid timeout inode\n");
1827 if (sk->sk_state == TCP_TIME_WAIT)
1828 get_timewait6_sock(seq, v, st->num);
1829 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1830 get_openreq6(seq, v, st->num);
1832 get_tcp6_sock(seq, v, st->num);
1837 static const struct file_operations tcp6_afinfo_seq_fops = {
1838 .owner = THIS_MODULE,
1839 .open = tcp_seq_open,
1841 .llseek = seq_lseek,
1842 .release = seq_release_net
1845 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1848 .seq_fops = &tcp6_afinfo_seq_fops,
1850 .show = tcp6_seq_show,
1854 int __net_init tcp6_proc_init(struct net *net)
1856 return tcp_proc_register(net, &tcp6_seq_afinfo);
1859 void tcp6_proc_exit(struct net *net)
1861 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1865 static void tcp_v6_clear_sk(struct sock *sk, int size)
1867 struct inet_sock *inet = inet_sk(sk);
1869 /* we do not want to clear pinet6 field, because of RCU lookups */
1870 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1872 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1873 memset(&inet->pinet6 + 1, 0, size);
1876 struct proto tcpv6_prot = {
1878 .owner = THIS_MODULE,
1880 .connect = tcp_v6_connect,
1881 .disconnect = tcp_disconnect,
1882 .accept = inet_csk_accept,
1884 .init = tcp_v6_init_sock,
1885 .destroy = tcp_v6_destroy_sock,
1886 .shutdown = tcp_shutdown,
1887 .setsockopt = tcp_setsockopt,
1888 .getsockopt = tcp_getsockopt,
1889 .recvmsg = tcp_recvmsg,
1890 .sendmsg = tcp_sendmsg,
1891 .sendpage = tcp_sendpage,
1892 .backlog_rcv = tcp_v6_do_rcv,
1893 .release_cb = tcp_release_cb,
1895 .unhash = inet_unhash,
1896 .get_port = inet_csk_get_port,
1897 .enter_memory_pressure = tcp_enter_memory_pressure,
1898 .stream_memory_free = tcp_stream_memory_free,
1899 .sockets_allocated = &tcp_sockets_allocated,
1900 .memory_allocated = &tcp_memory_allocated,
1901 .memory_pressure = &tcp_memory_pressure,
1902 .orphan_count = &tcp_orphan_count,
1903 .sysctl_mem = sysctl_tcp_mem,
1904 .sysctl_wmem = sysctl_tcp_wmem,
1905 .sysctl_rmem = sysctl_tcp_rmem,
1906 .max_header = MAX_TCP_HEADER,
1907 .obj_size = sizeof(struct tcp6_sock),
1908 .slab_flags = SLAB_DESTROY_BY_RCU,
1909 .twsk_prot = &tcp6_timewait_sock_ops,
1910 .rsk_prot = &tcp6_request_sock_ops,
1911 .h.hashinfo = &tcp_hashinfo,
1912 .no_autobind = true,
1913 #ifdef CONFIG_COMPAT
1914 .compat_setsockopt = compat_tcp_setsockopt,
1915 .compat_getsockopt = compat_tcp_getsockopt,
1917 .clear_sk = tcp_v6_clear_sk,
1918 .diag_destroy = tcp_abort,
1921 static const struct inet6_protocol tcpv6_protocol = {
1922 .early_demux = tcp_v6_early_demux,
1923 .handler = tcp_v6_rcv,
1924 .err_handler = tcp_v6_err,
1925 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1928 static struct inet_protosw tcpv6_protosw = {
1929 .type = SOCK_STREAM,
1930 .protocol = IPPROTO_TCP,
1931 .prot = &tcpv6_prot,
1932 .ops = &inet6_stream_ops,
1933 .flags = INET_PROTOSW_PERMANENT |
1937 static int __net_init tcpv6_net_init(struct net *net)
1939 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1940 SOCK_RAW, IPPROTO_TCP, net);
1943 static void __net_exit tcpv6_net_exit(struct net *net)
1945 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1948 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1950 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1953 static struct pernet_operations tcpv6_net_ops = {
1954 .init = tcpv6_net_init,
1955 .exit = tcpv6_net_exit,
1956 .exit_batch = tcpv6_net_exit_batch,
1959 int __init tcpv6_init(void)
1963 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1967 /* register inet6 protocol */
1968 ret = inet6_register_protosw(&tcpv6_protosw);
1970 goto out_tcpv6_protocol;
1972 ret = register_pernet_subsys(&tcpv6_net_ops);
1974 goto out_tcpv6_protosw;
1979 inet6_unregister_protosw(&tcpv6_protosw);
1981 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1985 void tcpv6_exit(void)
1987 unregister_pernet_subsys(&tcpv6_net_ops);
1988 inet6_unregister_protosw(&tcpv6_protosw);
1989 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);