3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
67 #include <asm/uaccess.h>
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
75 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req);
79 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static void __tcp_v6_send_check(struct sk_buff *skb,
81 const struct in6_addr *saddr,
82 const struct in6_addr *daddr);
84 static const struct inet_connection_sock_af_ops ipv6_mapped;
85 static const struct inet_connection_sock_af_ops ipv6_specific;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
90 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91 const struct in6_addr *addr)
97 static void tcp_v6_hash(struct sock *sk)
99 if (sk->sk_state != TCP_CLOSE) {
100 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
105 __inet6_hash(sk, NULL);
110 static __inline__ __sum16 tcp_v6_check(int len,
111 const struct in6_addr *saddr,
112 const struct in6_addr *daddr,
115 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 ipv6_hdr(skb)->saddr.s6_addr32,
123 tcp_hdr(skb)->source);
126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 struct inet_sock *inet = inet_sk(sk);
131 struct inet_connection_sock *icsk = inet_csk(sk);
132 struct ipv6_pinfo *np = inet6_sk(sk);
133 struct tcp_sock *tp = tcp_sk(sk);
134 struct in6_addr *saddr = NULL, *final_p, final;
137 struct dst_entry *dst;
141 if (addr_len < SIN6_LEN_RFC2133)
144 if (usin->sin6_family != AF_INET6)
145 return -EAFNOSUPPORT;
147 memset(&fl6, 0, sizeof(fl6));
150 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 IP6_ECN_flow_init(fl6.flowlabel);
152 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 struct ip6_flowlabel *flowlabel;
154 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 if (flowlabel == NULL)
157 usin->sin6_addr = flowlabel->dst;
158 fl6_sock_release(flowlabel);
163 * connect() to INADDR_ANY means loopback (BSD'ism).
166 if(ipv6_addr_any(&usin->sin6_addr))
167 usin->sin6_addr.s6_addr[15] = 0x1;
169 addr_type = ipv6_addr_type(&usin->sin6_addr);
171 if(addr_type & IPV6_ADDR_MULTICAST)
174 if (addr_type&IPV6_ADDR_LINKLOCAL) {
175 if (addr_len >= sizeof(struct sockaddr_in6) &&
176 usin->sin6_scope_id) {
177 /* If interface is set while binding, indices
180 if (sk->sk_bound_dev_if &&
181 sk->sk_bound_dev_if != usin->sin6_scope_id)
184 sk->sk_bound_dev_if = usin->sin6_scope_id;
187 /* Connect to link-local address requires an interface */
188 if (!sk->sk_bound_dev_if)
192 if (tp->rx_opt.ts_recent_stamp &&
193 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
194 tp->rx_opt.ts_recent = 0;
195 tp->rx_opt.ts_recent_stamp = 0;
199 np->daddr = usin->sin6_addr;
200 np->flow_label = fl6.flowlabel;
206 if (addr_type == IPV6_ADDR_MAPPED) {
207 u32 exthdrlen = icsk->icsk_ext_hdr_len;
208 struct sockaddr_in sin;
210 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
212 if (__ipv6_only_sock(sk))
215 sin.sin_family = AF_INET;
216 sin.sin_port = usin->sin6_port;
217 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
219 icsk->icsk_af_ops = &ipv6_mapped;
220 sk->sk_backlog_rcv = tcp_v4_do_rcv;
221 #ifdef CONFIG_TCP_MD5SIG
222 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
225 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
228 icsk->icsk_ext_hdr_len = exthdrlen;
229 icsk->icsk_af_ops = &ipv6_specific;
230 sk->sk_backlog_rcv = tcp_v6_do_rcv;
231 #ifdef CONFIG_TCP_MD5SIG
232 tp->af_specific = &tcp_sock_ipv6_specific;
236 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
237 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
244 if (!ipv6_addr_any(&np->rcv_saddr))
245 saddr = &np->rcv_saddr;
247 fl6.flowi6_proto = IPPROTO_TCP;
248 fl6.daddr = np->daddr;
249 fl6.saddr = saddr ? *saddr : np->saddr;
250 fl6.flowi6_oif = sk->sk_bound_dev_if;
251 fl6.flowi6_mark = sk->sk_mark;
252 fl6.fl6_dport = usin->sin6_port;
253 fl6.fl6_sport = inet->inet_sport;
255 final_p = fl6_update_dst(&fl6, np->opt, &final);
257 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
259 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
267 np->rcv_saddr = *saddr;
270 /* set the source address */
272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
274 sk->sk_gso_type = SKB_GSO_TCPV6;
275 __ip6_dst_store(sk, dst, NULL, NULL);
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 struct inet_peer *peer = rt6_get_peer(rt);
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
297 icsk->icsk_ext_hdr_len = 0;
299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
304 inet->inet_dport = usin->sin6_port;
306 tcp_set_state(sk, TCP_SYN_SENT);
307 err = inet6_hash_connect(&tcp_death_row, sk);
312 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
317 err = tcp_connect(sk);
324 tcp_set_state(sk, TCP_CLOSE);
327 inet->inet_dport = 0;
328 sk->sk_route_caps = 0;
332 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 u8 type, u8 code, int offset, __be32 info)
335 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 struct ipv6_pinfo *np;
342 struct net *net = dev_net(skb->dev);
344 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
348 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
353 if (sk->sk_state == TCP_TIME_WAIT) {
354 inet_twsk_put(inet_twsk(sk));
359 if (sock_owned_by_user(sk))
360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
362 if (sk->sk_state == TCP_CLOSE)
365 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
380 if (type == ICMPV6_PKT_TOOBIG) {
381 struct dst_entry *dst;
383 if (sock_owned_by_user(sk))
385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
388 /* icmp should have updated the destination cache entry */
389 dst = __sk_dst_check(sk, np->dst_cookie);
392 struct inet_sock *inet = inet_sk(sk);
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
399 memset(&fl6, 0, sizeof(fl6));
400 fl6.flowi6_proto = IPPROTO_TCP;
401 fl6.daddr = np->daddr;
402 fl6.saddr = np->saddr;
403 fl6.flowi6_oif = sk->sk_bound_dev_if;
404 fl6.flowi6_mark = sk->sk_mark;
405 fl6.fl6_dport = inet->inet_dport;
406 fl6.fl6_sport = inet->inet_sport;
407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
411 sk->sk_err_soft = -PTR_ERR(dst);
418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk);
421 } /* else let the usual retransmit timer handle it */
426 icmpv6_err_convert(type, code, &err);
428 /* Might be for an request_sock */
429 switch (sk->sk_state) {
430 struct request_sock *req, **prev;
432 if (sock_owned_by_user(sk))
435 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 &hdr->saddr, inet6_iif(skb));
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
443 WARN_ON(req->sk != NULL);
445 if (seq != tcp_rsk(req)->snt_isn) {
446 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
450 inet_csk_reqsk_queue_drop(sk, req, prev);
454 case TCP_SYN_RECV: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk)) {
458 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
462 sk->sk_err_soft = err;
466 if (!sock_owned_by_user(sk) && np->recverr) {
468 sk->sk_error_report(sk);
470 sk->sk_err_soft = err;
478 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp)
481 struct inet6_request_sock *treq = inet6_rsk(req);
482 struct ipv6_pinfo *np = inet6_sk(sk);
483 struct sk_buff * skb;
484 struct ipv6_txoptions *opt = NULL;
485 struct in6_addr * final_p, final;
487 struct dst_entry *dst;
490 memset(&fl6, 0, sizeof(fl6));
491 fl6.flowi6_proto = IPPROTO_TCP;
492 fl6.daddr = treq->rmt_addr;
493 fl6.saddr = treq->loc_addr;
495 fl6.flowi6_oif = treq->iif;
496 fl6.flowi6_mark = sk->sk_mark;
497 fl6.fl6_dport = inet_rsk(req)->rmt_port;
498 fl6.fl6_sport = inet_rsk(req)->loc_port;
499 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
502 final_p = fl6_update_dst(&fl6, opt, &final);
504 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
510 skb = tcp_make_synack(sk, dst, req, rvp);
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
515 fl6.daddr = treq->rmt_addr;
516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err);
521 if (opt && opt != np->opt)
522 sock_kfree_s(sk, opt, opt->tot_len);
527 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp)
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp);
534 static void tcp_v6_reqsk_destructor(struct request_sock *req)
536 kfree_skb(inet6_rsk(req)->pktopts);
539 #ifdef CONFIG_TCP_MD5SIG
540 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 const struct in6_addr *addr)
543 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
546 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
547 struct sock *addr_sk)
549 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
552 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
553 struct request_sock *req)
555 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
558 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
561 struct tcp_md5sig cmd;
562 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
564 if (optlen < sizeof(cmd))
567 if (copy_from_user(&cmd, optval, sizeof(cmd)))
570 if (sin6->sin6_family != AF_INET6)
573 if (!cmd.tcpm_keylen) {
574 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
575 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
577 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
581 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
584 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
585 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
586 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
588 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
589 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
592 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
593 const struct in6_addr *daddr,
594 const struct in6_addr *saddr, int nbytes)
596 struct tcp6_pseudohdr *bp;
597 struct scatterlist sg;
599 bp = &hp->md5_blk.ip6;
600 /* 1. TCP pseudo-header (RFC2460) */
603 bp->protocol = cpu_to_be32(IPPROTO_TCP);
604 bp->len = cpu_to_be32(nbytes);
606 sg_init_one(&sg, bp, sizeof(*bp));
607 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
610 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
611 const struct in6_addr *daddr, struct in6_addr *saddr,
612 const struct tcphdr *th)
614 struct tcp_md5sig_pool *hp;
615 struct hash_desc *desc;
617 hp = tcp_get_md5sig_pool();
619 goto clear_hash_noput;
620 desc = &hp->md5_desc;
622 if (crypto_hash_init(desc))
624 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
626 if (tcp_md5_hash_header(hp, th))
628 if (tcp_md5_hash_key(hp, key))
630 if (crypto_hash_final(desc, md5_hash))
633 tcp_put_md5sig_pool();
637 tcp_put_md5sig_pool();
639 memset(md5_hash, 0, 16);
643 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
644 const struct sock *sk,
645 const struct request_sock *req,
646 const struct sk_buff *skb)
648 const struct in6_addr *saddr, *daddr;
649 struct tcp_md5sig_pool *hp;
650 struct hash_desc *desc;
651 const struct tcphdr *th = tcp_hdr(skb);
654 saddr = &inet6_sk(sk)->saddr;
655 daddr = &inet6_sk(sk)->daddr;
657 saddr = &inet6_rsk(req)->loc_addr;
658 daddr = &inet6_rsk(req)->rmt_addr;
660 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
661 saddr = &ip6h->saddr;
662 daddr = &ip6h->daddr;
665 hp = tcp_get_md5sig_pool();
667 goto clear_hash_noput;
668 desc = &hp->md5_desc;
670 if (crypto_hash_init(desc))
673 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
675 if (tcp_md5_hash_header(hp, th))
677 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
679 if (tcp_md5_hash_key(hp, key))
681 if (crypto_hash_final(desc, md5_hash))
684 tcp_put_md5sig_pool();
688 tcp_put_md5sig_pool();
690 memset(md5_hash, 0, 16);
694 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
696 const __u8 *hash_location = NULL;
697 struct tcp_md5sig_key *hash_expected;
698 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
699 const struct tcphdr *th = tcp_hdr(skb);
703 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
704 hash_location = tcp_parse_md5sig_option(th);
706 /* We've parsed the options - do we have a hash? */
707 if (!hash_expected && !hash_location)
710 if (hash_expected && !hash_location) {
711 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
715 if (!hash_expected && hash_location) {
716 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
720 /* check the signature */
721 genhash = tcp_v6_md5_hash_skb(newhash,
725 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
726 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
727 genhash ? "failed" : "mismatch",
728 &ip6h->saddr, ntohs(th->source),
729 &ip6h->daddr, ntohs(th->dest));
736 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
738 .obj_size = sizeof(struct tcp6_request_sock),
739 .rtx_syn_ack = tcp_v6_rtx_synack,
740 .send_ack = tcp_v6_reqsk_send_ack,
741 .destructor = tcp_v6_reqsk_destructor,
742 .send_reset = tcp_v6_send_reset,
743 .syn_ack_timeout = tcp_syn_ack_timeout,
746 #ifdef CONFIG_TCP_MD5SIG
747 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
748 .md5_lookup = tcp_v6_reqsk_md5_lookup,
749 .calc_md5_hash = tcp_v6_md5_hash_skb,
753 static void __tcp_v6_send_check(struct sk_buff *skb,
754 const struct in6_addr *saddr, const struct in6_addr *daddr)
756 struct tcphdr *th = tcp_hdr(skb);
758 if (skb->ip_summed == CHECKSUM_PARTIAL) {
759 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
760 skb->csum_start = skb_transport_header(skb) - skb->head;
761 skb->csum_offset = offsetof(struct tcphdr, check);
763 th->check = tcp_v6_check(skb->len, saddr, daddr,
764 csum_partial(th, th->doff << 2,
769 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
771 struct ipv6_pinfo *np = inet6_sk(sk);
773 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
776 static int tcp_v6_gso_send_check(struct sk_buff *skb)
778 const struct ipv6hdr *ipv6h;
781 if (!pskb_may_pull(skb, sizeof(*th)))
784 ipv6h = ipv6_hdr(skb);
788 skb->ip_summed = CHECKSUM_PARTIAL;
789 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
793 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
796 const struct ipv6hdr *iph = skb_gro_network_header(skb);
798 switch (skb->ip_summed) {
799 case CHECKSUM_COMPLETE:
800 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
802 skb->ip_summed = CHECKSUM_UNNECESSARY;
808 NAPI_GRO_CB(skb)->flush = 1;
812 return tcp_gro_receive(head, skb);
815 static int tcp6_gro_complete(struct sk_buff *skb)
817 const struct ipv6hdr *iph = ipv6_hdr(skb);
818 struct tcphdr *th = tcp_hdr(skb);
820 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
821 &iph->saddr, &iph->daddr, 0);
822 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
824 return tcp_gro_complete(skb);
827 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
828 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
830 const struct tcphdr *th = tcp_hdr(skb);
832 struct sk_buff *buff;
834 struct net *net = dev_net(skb_dst(skb)->dev);
835 struct sock *ctl_sk = net->ipv6.tcp_sk;
836 unsigned int tot_len = sizeof(struct tcphdr);
837 struct dst_entry *dst;
841 tot_len += TCPOLEN_TSTAMP_ALIGNED;
842 #ifdef CONFIG_TCP_MD5SIG
844 tot_len += TCPOLEN_MD5SIG_ALIGNED;
847 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
852 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
854 t1 = (struct tcphdr *) skb_push(buff, tot_len);
855 skb_reset_transport_header(buff);
857 /* Swap the send and the receive. */
858 memset(t1, 0, sizeof(*t1));
859 t1->dest = th->source;
860 t1->source = th->dest;
861 t1->doff = tot_len / 4;
862 t1->seq = htonl(seq);
863 t1->ack_seq = htonl(ack);
864 t1->ack = !rst || !th->ack;
866 t1->window = htons(win);
868 topt = (__be32 *)(t1 + 1);
871 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
872 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
873 *topt++ = htonl(tcp_time_stamp);
877 #ifdef CONFIG_TCP_MD5SIG
879 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
880 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
881 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
882 &ipv6_hdr(skb)->saddr,
883 &ipv6_hdr(skb)->daddr, t1);
887 memset(&fl6, 0, sizeof(fl6));
888 fl6.daddr = ipv6_hdr(skb)->saddr;
889 fl6.saddr = ipv6_hdr(skb)->daddr;
891 buff->ip_summed = CHECKSUM_PARTIAL;
894 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
896 fl6.flowi6_proto = IPPROTO_TCP;
897 fl6.flowi6_oif = inet6_iif(skb);
898 fl6.fl6_dport = t1->dest;
899 fl6.fl6_sport = t1->source;
900 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
902 /* Pass a socket to ip6_dst_lookup either it is for RST
903 * Underlying function will use this to retrieve the network
906 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
908 skb_dst_set(buff, dst);
909 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
910 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
912 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
919 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
921 const struct tcphdr *th = tcp_hdr(skb);
922 u32 seq = 0, ack_seq = 0;
923 struct tcp_md5sig_key *key = NULL;
924 #ifdef CONFIG_TCP_MD5SIG
925 const __u8 *hash_location = NULL;
926 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
927 unsigned char newhash[16];
929 struct sock *sk1 = NULL;
935 if (!ipv6_unicast_destination(skb))
938 #ifdef CONFIG_TCP_MD5SIG
939 hash_location = tcp_parse_md5sig_option(th);
940 if (!sk && hash_location) {
942 * active side is lost. Try to find listening socket through
943 * source port, and then find md5 key through listening socket.
944 * we are not loose security here:
945 * Incoming packet is checked with md5 hash with finding key,
946 * no RST generated if md5 hash doesn't match.
948 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
949 &tcp_hashinfo, &ipv6h->daddr,
950 ntohs(th->source), inet6_iif(skb));
955 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
959 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
960 if (genhash || memcmp(hash_location, newhash, 16) != 0)
963 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
968 seq = ntohl(th->ack_seq);
970 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
973 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
975 #ifdef CONFIG_TCP_MD5SIG
984 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
985 struct tcp_md5sig_key *key, u8 tclass)
987 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
990 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
992 struct inet_timewait_sock *tw = inet_twsk(sk);
993 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
995 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
996 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
997 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1003 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1004 struct request_sock *req)
1006 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1007 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1011 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1013 struct request_sock *req, **prev;
1014 const struct tcphdr *th = tcp_hdr(skb);
1017 /* Find possible connection requests. */
1018 req = inet6_csk_search_req(sk, &prev, th->source,
1019 &ipv6_hdr(skb)->saddr,
1020 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1022 return tcp_check_req(sk, skb, req, prev);
1024 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1025 &ipv6_hdr(skb)->saddr, th->source,
1026 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1029 if (nsk->sk_state != TCP_TIME_WAIT) {
1033 inet_twsk_put(inet_twsk(nsk));
1037 #ifdef CONFIG_SYN_COOKIES
1039 sk = cookie_v6_check(sk, skb);
1044 /* FIXME: this is substantially similar to the ipv4 code.
1045 * Can some kind of merge be done? -- erics
1047 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1049 struct tcp_extend_values tmp_ext;
1050 struct tcp_options_received tmp_opt;
1051 const u8 *hash_location;
1052 struct request_sock *req;
1053 struct inet6_request_sock *treq;
1054 struct ipv6_pinfo *np = inet6_sk(sk);
1055 struct tcp_sock *tp = tcp_sk(sk);
1056 __u32 isn = TCP_SKB_CB(skb)->when;
1057 struct dst_entry *dst = NULL;
1058 bool want_cookie = false;
1060 if (skb->protocol == htons(ETH_P_IP))
1061 return tcp_v4_conn_request(sk, skb);
1063 if (!ipv6_unicast_destination(skb))
1066 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1067 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1072 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1075 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1079 #ifdef CONFIG_TCP_MD5SIG
1080 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1083 tcp_clear_options(&tmp_opt);
1084 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1085 tmp_opt.user_mss = tp->rx_opt.user_mss;
1086 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1088 if (tmp_opt.cookie_plus > 0 &&
1089 tmp_opt.saw_tstamp &&
1090 !tp->rx_opt.cookie_out_never &&
1091 (sysctl_tcp_cookie_size > 0 ||
1092 (tp->cookie_values != NULL &&
1093 tp->cookie_values->cookie_desired > 0))) {
1096 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1097 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1099 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1102 /* Secret recipe starts with IP addresses */
1103 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1108 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1114 /* plus variable length Initiator Cookie */
1117 *c++ ^= *hash_location++;
1119 want_cookie = false; /* not our kind of cookie */
1120 tmp_ext.cookie_out_never = 0; /* false */
1121 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1122 } else if (!tp->rx_opt.cookie_in_always) {
1123 /* redundant indications, but ensure initialization. */
1124 tmp_ext.cookie_out_never = 1; /* true */
1125 tmp_ext.cookie_plus = 0;
1129 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1131 if (want_cookie && !tmp_opt.saw_tstamp)
1132 tcp_clear_options(&tmp_opt);
1134 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1135 tcp_openreq_init(req, &tmp_opt, skb);
1137 treq = inet6_rsk(req);
1138 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1139 treq->loc_addr = ipv6_hdr(skb)->daddr;
1140 if (!want_cookie || tmp_opt.tstamp_ok)
1141 TCP_ECN_create_request(req, skb);
1143 treq->iif = sk->sk_bound_dev_if;
1145 /* So that link locals have meaning */
1146 if (!sk->sk_bound_dev_if &&
1147 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1148 treq->iif = inet6_iif(skb);
1151 struct inet_peer *peer = NULL;
1153 if (ipv6_opt_accepted(sk, skb) ||
1154 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1155 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1156 atomic_inc(&skb->users);
1157 treq->pktopts = skb;
1161 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1162 req->cookie_ts = tmp_opt.tstamp_ok;
1166 /* VJ's idea. We save last timestamp seen
1167 * from the destination in peer table, when entering
1168 * state TIME-WAIT, and check against it before
1169 * accepting new connection request.
1171 * If "isn" is not zero, this request hit alive
1172 * timewait bucket, so that all the necessary checks
1173 * are made in the function processing timewait state.
1175 if (tmp_opt.saw_tstamp &&
1176 tcp_death_row.sysctl_tw_recycle &&
1177 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1178 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1179 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1181 inet_peer_refcheck(peer);
1182 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1183 (s32)(peer->tcp_ts - req->ts_recent) >
1185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1186 goto drop_and_release;
1189 /* Kill the following clause, if you dislike this way. */
1190 else if (!sysctl_tcp_syncookies &&
1191 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1192 (sysctl_max_syn_backlog >> 2)) &&
1193 (!peer || !peer->tcp_ts_stamp) &&
1194 (!dst || !dst_metric(dst, RTAX_RTT))) {
1195 /* Without syncookies last quarter of
1196 * backlog is filled with destinations,
1197 * proven to be alive.
1198 * It means that we continue to communicate
1199 * to destinations, already remembered
1200 * to the moment of synflood.
1202 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1203 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1204 goto drop_and_release;
1207 isn = tcp_v6_init_sequence(skb);
1210 tcp_rsk(req)->snt_isn = isn;
1211 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1213 security_inet_conn_request(sk, skb, req);
1215 if (tcp_v6_send_synack(sk, req,
1216 (struct request_values *)&tmp_ext) ||
1220 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1228 return 0; /* don't send reset */
1231 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1232 struct request_sock *req,
1233 struct dst_entry *dst)
1235 struct inet6_request_sock *treq;
1236 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1237 struct tcp6_sock *newtcp6sk;
1238 struct inet_sock *newinet;
1239 struct tcp_sock *newtp;
1241 struct ipv6_txoptions *opt;
1242 #ifdef CONFIG_TCP_MD5SIG
1243 struct tcp_md5sig_key *key;
1246 if (skb->protocol == htons(ETH_P_IP)) {
1251 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1256 newtcp6sk = (struct tcp6_sock *)newsk;
1257 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1259 newinet = inet_sk(newsk);
1260 newnp = inet6_sk(newsk);
1261 newtp = tcp_sk(newsk);
1263 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1265 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1267 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1269 newnp->rcv_saddr = newnp->saddr;
1271 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1272 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1273 #ifdef CONFIG_TCP_MD5SIG
1274 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1277 newnp->ipv6_ac_list = NULL;
1278 newnp->ipv6_fl_list = NULL;
1279 newnp->pktoptions = NULL;
1281 newnp->mcast_oif = inet6_iif(skb);
1282 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1283 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1286 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1287 * here, tcp_create_openreq_child now does this for us, see the comment in
1288 * that function for the gory details. -acme
1291 /* It is tricky place. Until this moment IPv4 tcp
1292 worked with IPv6 icsk.icsk_af_ops.
1295 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1300 treq = inet6_rsk(req);
1303 if (sk_acceptq_is_full(sk))
1307 dst = inet6_csk_route_req(sk, req);
1312 newsk = tcp_create_openreq_child(sk, req, skb);
1317 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1318 * count here, tcp_create_openreq_child now does this for us, see the
1319 * comment in that function for the gory details. -acme
1322 newsk->sk_gso_type = SKB_GSO_TCPV6;
1323 __ip6_dst_store(newsk, dst, NULL, NULL);
1325 newtcp6sk = (struct tcp6_sock *)newsk;
1326 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1328 newtp = tcp_sk(newsk);
1329 newinet = inet_sk(newsk);
1330 newnp = inet6_sk(newsk);
1332 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1334 newnp->daddr = treq->rmt_addr;
1335 newnp->saddr = treq->loc_addr;
1336 newnp->rcv_saddr = treq->loc_addr;
1337 newsk->sk_bound_dev_if = treq->iif;
1339 /* Now IPv6 options...
1341 First: no IPv4 options.
1343 newinet->inet_opt = NULL;
1344 newnp->ipv6_ac_list = NULL;
1345 newnp->ipv6_fl_list = NULL;
1348 newnp->rxopt.all = np->rxopt.all;
1350 /* Clone pktoptions received with SYN */
1351 newnp->pktoptions = NULL;
1352 if (treq->pktopts != NULL) {
1353 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1354 consume_skb(treq->pktopts);
1355 treq->pktopts = NULL;
1356 if (newnp->pktoptions)
1357 skb_set_owner_r(newnp->pktoptions, newsk);
1360 newnp->mcast_oif = inet6_iif(skb);
1361 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1362 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1364 /* Clone native IPv6 options from listening socket (if any)
1366 Yes, keeping reference count would be much more clever,
1367 but we make one more one thing there: reattach optmem
1371 newnp->opt = ipv6_dup_options(newsk, opt);
1373 sock_kfree_s(sk, opt, opt->tot_len);
1376 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1378 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1379 newnp->opt->opt_flen);
1381 tcp_mtup_init(newsk);
1382 tcp_sync_mss(newsk, dst_mtu(dst));
1383 newtp->advmss = dst_metric_advmss(dst);
1384 if (tcp_sk(sk)->rx_opt.user_mss &&
1385 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1386 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1388 tcp_initialize_rcv_mss(newsk);
1389 if (tcp_rsk(req)->snt_synack)
1390 tcp_valid_rtt_meas(newsk,
1391 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1392 newtp->total_retrans = req->retrans;
1394 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1395 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1397 #ifdef CONFIG_TCP_MD5SIG
1398 /* Copy over the MD5 key from the original socket */
1399 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1400 /* We're using one, so create a matching key
1401 * on the newsk structure. If we fail to get
1402 * memory, then we end up not copying the key
1405 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1406 AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1410 if (__inet_inherit_port(sk, newsk) < 0) {
1414 __inet6_hash(newsk, NULL);
1419 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1421 if (opt && opt != np->opt)
1422 sock_kfree_s(sk, opt, opt->tot_len);
1425 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1429 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1431 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1432 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1433 &ipv6_hdr(skb)->daddr, skb->csum)) {
1434 skb->ip_summed = CHECKSUM_UNNECESSARY;
1439 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1440 &ipv6_hdr(skb)->saddr,
1441 &ipv6_hdr(skb)->daddr, 0));
1443 if (skb->len <= 76) {
1444 return __skb_checksum_complete(skb);
1449 /* The socket must have it's spinlock held when we get
1452 * We have a potential double-lock case here, so even when
1453 * doing backlog processing we use the BH locking scheme.
1454 * This is because we cannot sleep with the original spinlock
1457 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1459 struct ipv6_pinfo *np = inet6_sk(sk);
1460 struct tcp_sock *tp;
1461 struct sk_buff *opt_skb = NULL;
1463 /* Imagine: socket is IPv6. IPv4 packet arrives,
1464 goes to IPv4 receive handler and backlogged.
1465 From backlog it always goes here. Kerboom...
1466 Fortunately, tcp_rcv_established and rcv_established
1467 handle them correctly, but it is not case with
1468 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1471 if (skb->protocol == htons(ETH_P_IP))
1472 return tcp_v4_do_rcv(sk, skb);
1474 #ifdef CONFIG_TCP_MD5SIG
1475 if (tcp_v6_inbound_md5_hash (sk, skb))
1479 if (sk_filter(sk, skb))
1483 * socket locking is here for SMP purposes as backlog rcv
1484 * is currently called with bh processing disabled.
1487 /* Do Stevens' IPV6_PKTOPTIONS.
1489 Yes, guys, it is the only place in our code, where we
1490 may make it not affecting IPv4.
1491 The rest of code is protocol independent,
1492 and I do not like idea to uglify IPv4.
1494 Actually, all the idea behind IPV6_PKTOPTIONS
1495 looks not very well thought. For now we latch
1496 options, received in the last packet, enqueued
1497 by tcp. Feel free to propose better solution.
1501 opt_skb = skb_clone(skb, GFP_ATOMIC);
1503 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1504 sock_rps_save_rxhash(sk, skb);
1505 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1508 goto ipv6_pktoptions;
1512 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1515 if (sk->sk_state == TCP_LISTEN) {
1516 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1521 * Queue it on the new socket if the new socket is active,
1522 * otherwise we just shortcircuit this and continue with
1526 sock_rps_save_rxhash(nsk, skb);
1527 if (tcp_child_process(sk, nsk, skb))
1530 __kfree_skb(opt_skb);
1534 sock_rps_save_rxhash(sk, skb);
1536 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1539 goto ipv6_pktoptions;
1543 tcp_v6_send_reset(sk, skb);
1546 __kfree_skb(opt_skb);
1550 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1555 /* Do you ask, what is it?
1557 1. skb was enqueued by tcp.
1558 2. skb is added to tail of read queue, rather than out of order.
1559 3. socket is not in passive state.
1560 4. Finally, it really contains options, which user wants to receive.
1563 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1564 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1565 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1566 np->mcast_oif = inet6_iif(opt_skb);
1567 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1568 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1569 if (np->rxopt.bits.rxtclass)
1570 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1571 if (ipv6_opt_accepted(sk, opt_skb)) {
1572 skb_set_owner_r(opt_skb, sk);
1573 opt_skb = xchg(&np->pktoptions, opt_skb);
1575 __kfree_skb(opt_skb);
1576 opt_skb = xchg(&np->pktoptions, NULL);
1584 static int tcp_v6_rcv(struct sk_buff *skb)
1586 const struct tcphdr *th;
1587 const struct ipv6hdr *hdr;
1590 struct net *net = dev_net(skb->dev);
1592 if (skb->pkt_type != PACKET_HOST)
1596 * Count it even if it's bad.
1598 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1600 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1605 if (th->doff < sizeof(struct tcphdr)/4)
1607 if (!pskb_may_pull(skb, th->doff*4))
1610 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1614 hdr = ipv6_hdr(skb);
1615 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1616 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1617 skb->len - th->doff*4);
1618 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1619 TCP_SKB_CB(skb)->when = 0;
1620 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1621 TCP_SKB_CB(skb)->sacked = 0;
1623 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1628 if (sk->sk_state == TCP_TIME_WAIT)
1631 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1632 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1633 goto discard_and_relse;
1636 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1637 goto discard_and_relse;
1639 if (sk_filter(sk, skb))
1640 goto discard_and_relse;
1644 bh_lock_sock_nested(sk);
1646 if (!sock_owned_by_user(sk)) {
1647 #ifdef CONFIG_NET_DMA
1648 struct tcp_sock *tp = tcp_sk(sk);
1649 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1650 tp->ucopy.dma_chan = net_dma_find_channel();
1651 if (tp->ucopy.dma_chan)
1652 ret = tcp_v6_do_rcv(sk, skb);
1656 if (!tcp_prequeue(sk, skb))
1657 ret = tcp_v6_do_rcv(sk, skb);
1659 } else if (unlikely(sk_add_backlog(sk, skb,
1660 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1662 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1663 goto discard_and_relse;
1668 return ret ? -1 : 0;
1671 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1674 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1676 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1678 tcp_v6_send_reset(NULL, skb);
1695 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1696 inet_twsk_put(inet_twsk(sk));
1700 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1701 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1702 inet_twsk_put(inet_twsk(sk));
1706 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1711 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1712 &ipv6_hdr(skb)->daddr,
1713 ntohs(th->dest), inet6_iif(skb));
1715 struct inet_timewait_sock *tw = inet_twsk(sk);
1716 inet_twsk_deschedule(tw, &tcp_death_row);
1721 /* Fall through to ACK */
1724 tcp_v6_timewait_ack(sk, skb);
1728 case TCP_TW_SUCCESS:;
1733 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1735 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1736 struct ipv6_pinfo *np = inet6_sk(sk);
1737 struct inet_peer *peer;
1740 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1741 peer = inet_getpeer_v6(&np->daddr, 1);
1745 rt6_bind_peer(rt, 1);
1746 peer = rt->rt6i_peer;
1747 *release_it = false;
1753 static void *tcp_v6_tw_get_peer(struct sock *sk)
1755 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1756 const struct inet_timewait_sock *tw = inet_twsk(sk);
1758 if (tw->tw_family == AF_INET)
1759 return tcp_v4_tw_get_peer(sk);
1761 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1764 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1765 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1766 .twsk_unique = tcp_twsk_unique,
1767 .twsk_destructor= tcp_twsk_destructor,
1768 .twsk_getpeer = tcp_v6_tw_get_peer,
1771 static const struct inet_connection_sock_af_ops ipv6_specific = {
1772 .queue_xmit = inet6_csk_xmit,
1773 .send_check = tcp_v6_send_check,
1774 .rebuild_header = inet6_sk_rebuild_header,
1775 .conn_request = tcp_v6_conn_request,
1776 .syn_recv_sock = tcp_v6_syn_recv_sock,
1777 .get_peer = tcp_v6_get_peer,
1778 .net_header_len = sizeof(struct ipv6hdr),
1779 .net_frag_header_len = sizeof(struct frag_hdr),
1780 .setsockopt = ipv6_setsockopt,
1781 .getsockopt = ipv6_getsockopt,
1782 .addr2sockaddr = inet6_csk_addr2sockaddr,
1783 .sockaddr_len = sizeof(struct sockaddr_in6),
1784 .bind_conflict = inet6_csk_bind_conflict,
1785 #ifdef CONFIG_COMPAT
1786 .compat_setsockopt = compat_ipv6_setsockopt,
1787 .compat_getsockopt = compat_ipv6_getsockopt,
1791 #ifdef CONFIG_TCP_MD5SIG
1792 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1793 .md5_lookup = tcp_v6_md5_lookup,
1794 .calc_md5_hash = tcp_v6_md5_hash_skb,
1795 .md5_parse = tcp_v6_parse_md5_keys,
1800 * TCP over IPv4 via INET6 API
1803 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1804 .queue_xmit = ip_queue_xmit,
1805 .send_check = tcp_v4_send_check,
1806 .rebuild_header = inet_sk_rebuild_header,
1807 .conn_request = tcp_v6_conn_request,
1808 .syn_recv_sock = tcp_v6_syn_recv_sock,
1809 .get_peer = tcp_v4_get_peer,
1810 .net_header_len = sizeof(struct iphdr),
1811 .setsockopt = ipv6_setsockopt,
1812 .getsockopt = ipv6_getsockopt,
1813 .addr2sockaddr = inet6_csk_addr2sockaddr,
1814 .sockaddr_len = sizeof(struct sockaddr_in6),
1815 .bind_conflict = inet6_csk_bind_conflict,
1816 #ifdef CONFIG_COMPAT
1817 .compat_setsockopt = compat_ipv6_setsockopt,
1818 .compat_getsockopt = compat_ipv6_getsockopt,
1822 #ifdef CONFIG_TCP_MD5SIG
1823 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1824 .md5_lookup = tcp_v4_md5_lookup,
1825 .calc_md5_hash = tcp_v4_md5_hash_skb,
1826 .md5_parse = tcp_v6_parse_md5_keys,
1830 /* NOTE: A lot of things set to zero explicitly by call to
1831 * sk_alloc() so need not be done here.
1833 static int tcp_v6_init_sock(struct sock *sk)
1835 struct inet_connection_sock *icsk = inet_csk(sk);
1839 icsk->icsk_af_ops = &ipv6_specific;
1841 #ifdef CONFIG_TCP_MD5SIG
1842 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1848 static void tcp_v6_destroy_sock(struct sock *sk)
1850 tcp_v4_destroy_sock(sk);
1851 inet6_destroy_sock(sk);
1854 #ifdef CONFIG_PROC_FS
1855 /* Proc filesystem TCPv6 sock list dumping. */
1856 static void get_openreq6(struct seq_file *seq,
1857 const struct sock *sk, struct request_sock *req, int i, int uid)
1859 int ttd = req->expires - jiffies;
1860 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1861 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1867 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1868 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1870 src->s6_addr32[0], src->s6_addr32[1],
1871 src->s6_addr32[2], src->s6_addr32[3],
1872 ntohs(inet_rsk(req)->loc_port),
1873 dest->s6_addr32[0], dest->s6_addr32[1],
1874 dest->s6_addr32[2], dest->s6_addr32[3],
1875 ntohs(inet_rsk(req)->rmt_port),
1877 0,0, /* could print option size, but that is af dependent. */
1878 1, /* timers active (only the expire timer) */
1879 jiffies_to_clock_t(ttd),
1882 0, /* non standard timer */
1883 0, /* open_requests have no inode */
1887 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1889 const struct in6_addr *dest, *src;
1892 unsigned long timer_expires;
1893 const struct inet_sock *inet = inet_sk(sp);
1894 const struct tcp_sock *tp = tcp_sk(sp);
1895 const struct inet_connection_sock *icsk = inet_csk(sp);
1896 const struct ipv6_pinfo *np = inet6_sk(sp);
1899 src = &np->rcv_saddr;
1900 destp = ntohs(inet->inet_dport);
1901 srcp = ntohs(inet->inet_sport);
1903 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1905 timer_expires = icsk->icsk_timeout;
1906 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1908 timer_expires = icsk->icsk_timeout;
1909 } else if (timer_pending(&sp->sk_timer)) {
1911 timer_expires = sp->sk_timer.expires;
1914 timer_expires = jiffies;
1918 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1919 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1921 src->s6_addr32[0], src->s6_addr32[1],
1922 src->s6_addr32[2], src->s6_addr32[3], srcp,
1923 dest->s6_addr32[0], dest->s6_addr32[1],
1924 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1926 tp->write_seq-tp->snd_una,
1927 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1929 jiffies_to_clock_t(timer_expires - jiffies),
1930 icsk->icsk_retransmits,
1932 icsk->icsk_probes_out,
1934 atomic_read(&sp->sk_refcnt), sp,
1935 jiffies_to_clock_t(icsk->icsk_rto),
1936 jiffies_to_clock_t(icsk->icsk_ack.ato),
1937 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1939 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1943 static void get_timewait6_sock(struct seq_file *seq,
1944 struct inet_timewait_sock *tw, int i)
1946 const struct in6_addr *dest, *src;
1948 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1949 int ttd = tw->tw_ttd - jiffies;
1954 dest = &tw6->tw_v6_daddr;
1955 src = &tw6->tw_v6_rcv_saddr;
1956 destp = ntohs(tw->tw_dport);
1957 srcp = ntohs(tw->tw_sport);
1960 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1961 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1963 src->s6_addr32[0], src->s6_addr32[1],
1964 src->s6_addr32[2], src->s6_addr32[3], srcp,
1965 dest->s6_addr32[0], dest->s6_addr32[1],
1966 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1967 tw->tw_substate, 0, 0,
1968 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1969 atomic_read(&tw->tw_refcnt), tw);
1972 static int tcp6_seq_show(struct seq_file *seq, void *v)
1974 struct tcp_iter_state *st;
1976 if (v == SEQ_START_TOKEN) {
1981 "st tx_queue rx_queue tr tm->when retrnsmt"
1982 " uid timeout inode\n");
1987 switch (st->state) {
1988 case TCP_SEQ_STATE_LISTENING:
1989 case TCP_SEQ_STATE_ESTABLISHED:
1990 get_tcp6_sock(seq, v, st->num);
1992 case TCP_SEQ_STATE_OPENREQ:
1993 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1995 case TCP_SEQ_STATE_TIME_WAIT:
1996 get_timewait6_sock(seq, v, st->num);
2003 static const struct file_operations tcp6_afinfo_seq_fops = {
2004 .owner = THIS_MODULE,
2005 .open = tcp_seq_open,
2007 .llseek = seq_lseek,
2008 .release = seq_release_net
2011 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2014 .seq_fops = &tcp6_afinfo_seq_fops,
2016 .show = tcp6_seq_show,
2020 int __net_init tcp6_proc_init(struct net *net)
2022 return tcp_proc_register(net, &tcp6_seq_afinfo);
2025 void tcp6_proc_exit(struct net *net)
2027 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2031 struct proto tcpv6_prot = {
2033 .owner = THIS_MODULE,
2035 .connect = tcp_v6_connect,
2036 .disconnect = tcp_disconnect,
2037 .accept = inet_csk_accept,
2039 .init = tcp_v6_init_sock,
2040 .destroy = tcp_v6_destroy_sock,
2041 .shutdown = tcp_shutdown,
2042 .setsockopt = tcp_setsockopt,
2043 .getsockopt = tcp_getsockopt,
2044 .recvmsg = tcp_recvmsg,
2045 .sendmsg = tcp_sendmsg,
2046 .sendpage = tcp_sendpage,
2047 .backlog_rcv = tcp_v6_do_rcv,
2048 .hash = tcp_v6_hash,
2049 .unhash = inet_unhash,
2050 .get_port = inet_csk_get_port,
2051 .enter_memory_pressure = tcp_enter_memory_pressure,
2052 .sockets_allocated = &tcp_sockets_allocated,
2053 .memory_allocated = &tcp_memory_allocated,
2054 .memory_pressure = &tcp_memory_pressure,
2055 .orphan_count = &tcp_orphan_count,
2056 .sysctl_wmem = sysctl_tcp_wmem,
2057 .sysctl_rmem = sysctl_tcp_rmem,
2058 .max_header = MAX_TCP_HEADER,
2059 .obj_size = sizeof(struct tcp6_sock),
2060 .slab_flags = SLAB_DESTROY_BY_RCU,
2061 .twsk_prot = &tcp6_timewait_sock_ops,
2062 .rsk_prot = &tcp6_request_sock_ops,
2063 .h.hashinfo = &tcp_hashinfo,
2064 .no_autobind = true,
2065 #ifdef CONFIG_COMPAT
2066 .compat_setsockopt = compat_tcp_setsockopt,
2067 .compat_getsockopt = compat_tcp_getsockopt,
2069 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2070 .proto_cgroup = tcp_proto_cgroup,
2074 static const struct inet6_protocol tcpv6_protocol = {
2075 .handler = tcp_v6_rcv,
2076 .err_handler = tcp_v6_err,
2077 .gso_send_check = tcp_v6_gso_send_check,
2078 .gso_segment = tcp_tso_segment,
2079 .gro_receive = tcp6_gro_receive,
2080 .gro_complete = tcp6_gro_complete,
2081 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2084 static struct inet_protosw tcpv6_protosw = {
2085 .type = SOCK_STREAM,
2086 .protocol = IPPROTO_TCP,
2087 .prot = &tcpv6_prot,
2088 .ops = &inet6_stream_ops,
2090 .flags = INET_PROTOSW_PERMANENT |
2094 static int __net_init tcpv6_net_init(struct net *net)
2096 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2097 SOCK_RAW, IPPROTO_TCP, net);
2100 static void __net_exit tcpv6_net_exit(struct net *net)
2102 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2105 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2107 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2110 static struct pernet_operations tcpv6_net_ops = {
2111 .init = tcpv6_net_init,
2112 .exit = tcpv6_net_exit,
2113 .exit_batch = tcpv6_net_exit_batch,
2116 int __init tcpv6_init(void)
2120 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2124 /* register inet6 protocol */
2125 ret = inet6_register_protosw(&tcpv6_protosw);
2127 goto out_tcpv6_protocol;
2129 ret = register_pernet_subsys(&tcpv6_net_ops);
2131 goto out_tcpv6_protosw;
2136 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2138 inet6_unregister_protosw(&tcpv6_protosw);
2142 void tcpv6_exit(void)
2144 unregister_pernet_subsys(&tcpv6_net_ops);
2145 inet6_unregister_protosw(&tcpv6_protosw);
2146 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);