3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
67 #include <asm/uaccess.h>
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
75 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req);
79 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static void __tcp_v6_send_check(struct sk_buff *skb,
81 const struct in6_addr *saddr,
82 const struct in6_addr *daddr);
84 static const struct inet_connection_sock_af_ops ipv6_mapped;
85 static const struct inet_connection_sock_af_ops ipv6_specific;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
90 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91 const struct in6_addr *addr)
97 static void tcp_v6_hash(struct sock *sk)
99 if (sk->sk_state != TCP_CLOSE) {
100 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
105 __inet6_hash(sk, NULL);
110 static __inline__ __sum16 tcp_v6_check(int len,
111 const struct in6_addr *saddr,
112 const struct in6_addr *daddr,
115 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 ipv6_hdr(skb)->saddr.s6_addr32,
123 tcp_hdr(skb)->source);
126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 struct inet_sock *inet = inet_sk(sk);
131 struct inet_connection_sock *icsk = inet_csk(sk);
132 struct ipv6_pinfo *np = inet6_sk(sk);
133 struct tcp_sock *tp = tcp_sk(sk);
134 struct in6_addr *saddr = NULL, *final_p, final;
137 struct dst_entry *dst;
141 if (addr_len < SIN6_LEN_RFC2133)
144 if (usin->sin6_family != AF_INET6)
145 return -EAFNOSUPPORT;
147 memset(&fl6, 0, sizeof(fl6));
150 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 IP6_ECN_flow_init(fl6.flowlabel);
152 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 struct ip6_flowlabel *flowlabel;
154 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 if (flowlabel == NULL)
157 usin->sin6_addr = flowlabel->dst;
158 fl6_sock_release(flowlabel);
163 * connect() to INADDR_ANY means loopback (BSD'ism).
166 if(ipv6_addr_any(&usin->sin6_addr))
167 usin->sin6_addr.s6_addr[15] = 0x1;
169 addr_type = ipv6_addr_type(&usin->sin6_addr);
171 if(addr_type & IPV6_ADDR_MULTICAST)
174 if (addr_type&IPV6_ADDR_LINKLOCAL) {
175 if (addr_len >= sizeof(struct sockaddr_in6) &&
176 usin->sin6_scope_id) {
177 /* If interface is set while binding, indices
180 if (sk->sk_bound_dev_if &&
181 sk->sk_bound_dev_if != usin->sin6_scope_id)
184 sk->sk_bound_dev_if = usin->sin6_scope_id;
187 /* Connect to link-local address requires an interface */
188 if (!sk->sk_bound_dev_if)
192 if (tp->rx_opt.ts_recent_stamp &&
193 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
194 tp->rx_opt.ts_recent = 0;
195 tp->rx_opt.ts_recent_stamp = 0;
199 np->daddr = usin->sin6_addr;
200 np->flow_label = fl6.flowlabel;
206 if (addr_type == IPV6_ADDR_MAPPED) {
207 u32 exthdrlen = icsk->icsk_ext_hdr_len;
208 struct sockaddr_in sin;
210 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
212 if (__ipv6_only_sock(sk))
215 sin.sin_family = AF_INET;
216 sin.sin_port = usin->sin6_port;
217 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
219 icsk->icsk_af_ops = &ipv6_mapped;
220 sk->sk_backlog_rcv = tcp_v4_do_rcv;
221 #ifdef CONFIG_TCP_MD5SIG
222 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
225 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
228 icsk->icsk_ext_hdr_len = exthdrlen;
229 icsk->icsk_af_ops = &ipv6_specific;
230 sk->sk_backlog_rcv = tcp_v6_do_rcv;
231 #ifdef CONFIG_TCP_MD5SIG
232 tp->af_specific = &tcp_sock_ipv6_specific;
236 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
237 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
244 if (!ipv6_addr_any(&np->rcv_saddr))
245 saddr = &np->rcv_saddr;
247 fl6.flowi6_proto = IPPROTO_TCP;
248 fl6.daddr = np->daddr;
249 fl6.saddr = saddr ? *saddr : np->saddr;
250 fl6.flowi6_oif = sk->sk_bound_dev_if;
251 fl6.flowi6_mark = sk->sk_mark;
252 fl6.fl6_dport = usin->sin6_port;
253 fl6.fl6_sport = inet->inet_sport;
255 final_p = fl6_update_dst(&fl6, np->opt, &final);
257 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
259 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
267 np->rcv_saddr = *saddr;
270 /* set the source address */
272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
274 sk->sk_gso_type = SKB_GSO_TCPV6;
275 __ip6_dst_store(sk, dst, NULL, NULL);
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 struct inet_peer *peer = rt6_get_peer(rt);
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
297 icsk->icsk_ext_hdr_len = 0;
299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
304 inet->inet_dport = usin->sin6_port;
306 tcp_set_state(sk, TCP_SYN_SENT);
307 err = inet6_hash_connect(&tcp_death_row, sk);
312 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
317 err = tcp_connect(sk);
324 tcp_set_state(sk, TCP_CLOSE);
327 inet->inet_dport = 0;
328 sk->sk_route_caps = 0;
332 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 u8 type, u8 code, int offset, __be32 info)
335 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 struct ipv6_pinfo *np;
342 struct net *net = dev_net(skb->dev);
344 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
348 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
353 if (sk->sk_state == TCP_TIME_WAIT) {
354 inet_twsk_put(inet_twsk(sk));
359 if (sock_owned_by_user(sk))
360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
362 if (sk->sk_state == TCP_CLOSE)
365 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
380 if (type == ICMPV6_PKT_TOOBIG) {
381 struct dst_entry *dst;
383 if (sock_owned_by_user(sk))
385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
388 /* icmp should have updated the destination cache entry */
389 dst = __sk_dst_check(sk, np->dst_cookie);
392 struct inet_sock *inet = inet_sk(sk);
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
399 memset(&fl6, 0, sizeof(fl6));
400 fl6.flowi6_proto = IPPROTO_TCP;
401 fl6.daddr = np->daddr;
402 fl6.saddr = np->saddr;
403 fl6.flowi6_oif = sk->sk_bound_dev_if;
404 fl6.flowi6_mark = sk->sk_mark;
405 fl6.fl6_dport = inet->inet_dport;
406 fl6.fl6_sport = inet->inet_sport;
407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
411 sk->sk_err_soft = -PTR_ERR(dst);
418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk);
421 } /* else let the usual retransmit timer handle it */
426 icmpv6_err_convert(type, code, &err);
428 /* Might be for an request_sock */
429 switch (sk->sk_state) {
430 struct request_sock *req, **prev;
432 if (sock_owned_by_user(sk))
435 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 &hdr->saddr, inet6_iif(skb));
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
443 WARN_ON(req->sk != NULL);
445 if (seq != tcp_rsk(req)->snt_isn) {
446 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
450 inet_csk_reqsk_queue_drop(sk, req, prev);
454 case TCP_SYN_RECV: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk)) {
458 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
462 sk->sk_err_soft = err;
466 if (!sock_owned_by_user(sk) && np->recverr) {
468 sk->sk_error_report(sk);
470 sk->sk_err_soft = err;
478 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp)
481 struct inet6_request_sock *treq = inet6_rsk(req);
482 struct ipv6_pinfo *np = inet6_sk(sk);
483 struct sk_buff * skb;
484 struct ipv6_txoptions *opt = NULL;
485 struct in6_addr * final_p, final;
487 struct dst_entry *dst;
490 memset(&fl6, 0, sizeof(fl6));
491 fl6.flowi6_proto = IPPROTO_TCP;
492 fl6.daddr = treq->rmt_addr;
493 fl6.saddr = treq->loc_addr;
495 fl6.flowi6_oif = treq->iif;
496 fl6.flowi6_mark = sk->sk_mark;
497 fl6.fl6_dport = inet_rsk(req)->rmt_port;
498 fl6.fl6_sport = inet_rsk(req)->loc_port;
499 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
502 final_p = fl6_update_dst(&fl6, opt, &final);
504 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
510 skb = tcp_make_synack(sk, dst, req, rvp);
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
515 fl6.daddr = treq->rmt_addr;
516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err);
521 if (opt && opt != np->opt)
522 sock_kfree_s(sk, opt, opt->tot_len);
527 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp)
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp);
534 static void tcp_v6_reqsk_destructor(struct request_sock *req)
536 kfree_skb(inet6_rsk(req)->pktopts);
539 #ifdef CONFIG_TCP_MD5SIG
540 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 const struct in6_addr *addr)
543 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
546 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
547 struct sock *addr_sk)
549 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
552 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
553 struct request_sock *req)
555 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
558 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
561 struct tcp_md5sig cmd;
562 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
564 if (optlen < sizeof(cmd))
567 if (copy_from_user(&cmd, optval, sizeof(cmd)))
570 if (sin6->sin6_family != AF_INET6)
573 if (!cmd.tcpm_keylen) {
574 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
575 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
577 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
581 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
584 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
585 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
586 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
588 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
589 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
592 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
593 const struct in6_addr *daddr,
594 const struct in6_addr *saddr, int nbytes)
596 struct tcp6_pseudohdr *bp;
597 struct scatterlist sg;
599 bp = &hp->md5_blk.ip6;
600 /* 1. TCP pseudo-header (RFC2460) */
603 bp->protocol = cpu_to_be32(IPPROTO_TCP);
604 bp->len = cpu_to_be32(nbytes);
606 sg_init_one(&sg, bp, sizeof(*bp));
607 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
610 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
611 const struct in6_addr *daddr, struct in6_addr *saddr,
612 const struct tcphdr *th)
614 struct tcp_md5sig_pool *hp;
615 struct hash_desc *desc;
617 hp = tcp_get_md5sig_pool();
619 goto clear_hash_noput;
620 desc = &hp->md5_desc;
622 if (crypto_hash_init(desc))
624 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
626 if (tcp_md5_hash_header(hp, th))
628 if (tcp_md5_hash_key(hp, key))
630 if (crypto_hash_final(desc, md5_hash))
633 tcp_put_md5sig_pool();
637 tcp_put_md5sig_pool();
639 memset(md5_hash, 0, 16);
643 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
644 const struct sock *sk,
645 const struct request_sock *req,
646 const struct sk_buff *skb)
648 const struct in6_addr *saddr, *daddr;
649 struct tcp_md5sig_pool *hp;
650 struct hash_desc *desc;
651 const struct tcphdr *th = tcp_hdr(skb);
654 saddr = &inet6_sk(sk)->saddr;
655 daddr = &inet6_sk(sk)->daddr;
657 saddr = &inet6_rsk(req)->loc_addr;
658 daddr = &inet6_rsk(req)->rmt_addr;
660 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
661 saddr = &ip6h->saddr;
662 daddr = &ip6h->daddr;
665 hp = tcp_get_md5sig_pool();
667 goto clear_hash_noput;
668 desc = &hp->md5_desc;
670 if (crypto_hash_init(desc))
673 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
675 if (tcp_md5_hash_header(hp, th))
677 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
679 if (tcp_md5_hash_key(hp, key))
681 if (crypto_hash_final(desc, md5_hash))
684 tcp_put_md5sig_pool();
688 tcp_put_md5sig_pool();
690 memset(md5_hash, 0, 16);
694 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
696 const __u8 *hash_location = NULL;
697 struct tcp_md5sig_key *hash_expected;
698 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
699 const struct tcphdr *th = tcp_hdr(skb);
703 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
704 hash_location = tcp_parse_md5sig_option(th);
706 /* We've parsed the options - do we have a hash? */
707 if (!hash_expected && !hash_location)
710 if (hash_expected && !hash_location) {
711 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
715 if (!hash_expected && hash_location) {
716 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
720 /* check the signature */
721 genhash = tcp_v6_md5_hash_skb(newhash,
725 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
726 if (net_ratelimit()) {
727 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
728 genhash ? "failed" : "mismatch",
729 &ip6h->saddr, ntohs(th->source),
730 &ip6h->daddr, ntohs(th->dest));
738 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
740 .obj_size = sizeof(struct tcp6_request_sock),
741 .rtx_syn_ack = tcp_v6_rtx_synack,
742 .send_ack = tcp_v6_reqsk_send_ack,
743 .destructor = tcp_v6_reqsk_destructor,
744 .send_reset = tcp_v6_send_reset,
745 .syn_ack_timeout = tcp_syn_ack_timeout,
748 #ifdef CONFIG_TCP_MD5SIG
749 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
750 .md5_lookup = tcp_v6_reqsk_md5_lookup,
751 .calc_md5_hash = tcp_v6_md5_hash_skb,
755 static void __tcp_v6_send_check(struct sk_buff *skb,
756 const struct in6_addr *saddr, const struct in6_addr *daddr)
758 struct tcphdr *th = tcp_hdr(skb);
760 if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
762 skb->csum_start = skb_transport_header(skb) - skb->head;
763 skb->csum_offset = offsetof(struct tcphdr, check);
765 th->check = tcp_v6_check(skb->len, saddr, daddr,
766 csum_partial(th, th->doff << 2,
771 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
773 struct ipv6_pinfo *np = inet6_sk(sk);
775 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
778 static int tcp_v6_gso_send_check(struct sk_buff *skb)
780 const struct ipv6hdr *ipv6h;
783 if (!pskb_may_pull(skb, sizeof(*th)))
786 ipv6h = ipv6_hdr(skb);
790 skb->ip_summed = CHECKSUM_PARTIAL;
791 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
795 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
798 const struct ipv6hdr *iph = skb_gro_network_header(skb);
800 switch (skb->ip_summed) {
801 case CHECKSUM_COMPLETE:
802 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
804 skb->ip_summed = CHECKSUM_UNNECESSARY;
810 NAPI_GRO_CB(skb)->flush = 1;
814 return tcp_gro_receive(head, skb);
817 static int tcp6_gro_complete(struct sk_buff *skb)
819 const struct ipv6hdr *iph = ipv6_hdr(skb);
820 struct tcphdr *th = tcp_hdr(skb);
822 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
823 &iph->saddr, &iph->daddr, 0);
824 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
826 return tcp_gro_complete(skb);
829 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
830 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
832 const struct tcphdr *th = tcp_hdr(skb);
834 struct sk_buff *buff;
836 struct net *net = dev_net(skb_dst(skb)->dev);
837 struct sock *ctl_sk = net->ipv6.tcp_sk;
838 unsigned int tot_len = sizeof(struct tcphdr);
839 struct dst_entry *dst;
843 tot_len += TCPOLEN_TSTAMP_ALIGNED;
844 #ifdef CONFIG_TCP_MD5SIG
846 tot_len += TCPOLEN_MD5SIG_ALIGNED;
849 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
854 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
856 t1 = (struct tcphdr *) skb_push(buff, tot_len);
857 skb_reset_transport_header(buff);
859 /* Swap the send and the receive. */
860 memset(t1, 0, sizeof(*t1));
861 t1->dest = th->source;
862 t1->source = th->dest;
863 t1->doff = tot_len / 4;
864 t1->seq = htonl(seq);
865 t1->ack_seq = htonl(ack);
866 t1->ack = !rst || !th->ack;
868 t1->window = htons(win);
870 topt = (__be32 *)(t1 + 1);
873 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
874 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
875 *topt++ = htonl(tcp_time_stamp);
879 #ifdef CONFIG_TCP_MD5SIG
881 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
882 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
883 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
884 &ipv6_hdr(skb)->saddr,
885 &ipv6_hdr(skb)->daddr, t1);
889 memset(&fl6, 0, sizeof(fl6));
890 fl6.daddr = ipv6_hdr(skb)->saddr;
891 fl6.saddr = ipv6_hdr(skb)->daddr;
893 buff->ip_summed = CHECKSUM_PARTIAL;
896 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
898 fl6.flowi6_proto = IPPROTO_TCP;
899 fl6.flowi6_oif = inet6_iif(skb);
900 fl6.fl6_dport = t1->dest;
901 fl6.fl6_sport = t1->source;
902 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
904 /* Pass a socket to ip6_dst_lookup either it is for RST
905 * Underlying function will use this to retrieve the network
908 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
910 skb_dst_set(buff, dst);
911 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
912 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
914 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
921 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
923 const struct tcphdr *th = tcp_hdr(skb);
924 u32 seq = 0, ack_seq = 0;
925 struct tcp_md5sig_key *key = NULL;
926 #ifdef CONFIG_TCP_MD5SIG
927 const __u8 *hash_location = NULL;
928 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
929 unsigned char newhash[16];
931 struct sock *sk1 = NULL;
937 if (!ipv6_unicast_destination(skb))
940 #ifdef CONFIG_TCP_MD5SIG
941 hash_location = tcp_parse_md5sig_option(th);
942 if (!sk && hash_location) {
944 * active side is lost. Try to find listening socket through
945 * source port, and then find md5 key through listening socket.
946 * we are not loose security here:
947 * Incoming packet is checked with md5 hash with finding key,
948 * no RST generated if md5 hash doesn't match.
950 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
951 &tcp_hashinfo, &ipv6h->daddr,
952 ntohs(th->source), inet6_iif(skb));
957 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
961 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
962 if (genhash || memcmp(hash_location, newhash, 16) != 0)
965 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
970 seq = ntohl(th->ack_seq);
972 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
975 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
977 #ifdef CONFIG_TCP_MD5SIG
986 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
987 struct tcp_md5sig_key *key, u8 tclass)
989 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
992 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
994 struct inet_timewait_sock *tw = inet_twsk(sk);
995 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
997 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
998 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
999 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1005 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1006 struct request_sock *req)
1008 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1009 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1013 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1015 struct request_sock *req, **prev;
1016 const struct tcphdr *th = tcp_hdr(skb);
1019 /* Find possible connection requests. */
1020 req = inet6_csk_search_req(sk, &prev, th->source,
1021 &ipv6_hdr(skb)->saddr,
1022 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1024 return tcp_check_req(sk, skb, req, prev);
1026 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1027 &ipv6_hdr(skb)->saddr, th->source,
1028 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1031 if (nsk->sk_state != TCP_TIME_WAIT) {
1035 inet_twsk_put(inet_twsk(nsk));
1039 #ifdef CONFIG_SYN_COOKIES
1041 sk = cookie_v6_check(sk, skb);
1046 /* FIXME: this is substantially similar to the ipv4 code.
1047 * Can some kind of merge be done? -- erics
1049 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1051 struct tcp_extend_values tmp_ext;
1052 struct tcp_options_received tmp_opt;
1053 const u8 *hash_location;
1054 struct request_sock *req;
1055 struct inet6_request_sock *treq;
1056 struct ipv6_pinfo *np = inet6_sk(sk);
1057 struct tcp_sock *tp = tcp_sk(sk);
1058 __u32 isn = TCP_SKB_CB(skb)->when;
1059 struct dst_entry *dst = NULL;
1060 int want_cookie = 0;
1062 if (skb->protocol == htons(ETH_P_IP))
1063 return tcp_v4_conn_request(sk, skb);
1065 if (!ipv6_unicast_destination(skb))
1068 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1069 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1074 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1077 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1081 #ifdef CONFIG_TCP_MD5SIG
1082 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1085 tcp_clear_options(&tmp_opt);
1086 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1087 tmp_opt.user_mss = tp->rx_opt.user_mss;
1088 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1090 if (tmp_opt.cookie_plus > 0 &&
1091 tmp_opt.saw_tstamp &&
1092 !tp->rx_opt.cookie_out_never &&
1093 (sysctl_tcp_cookie_size > 0 ||
1094 (tp->cookie_values != NULL &&
1095 tp->cookie_values->cookie_desired > 0))) {
1098 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1099 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1101 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1104 /* Secret recipe starts with IP addresses */
1105 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1110 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1116 /* plus variable length Initiator Cookie */
1119 *c++ ^= *hash_location++;
1121 want_cookie = 0; /* not our kind of cookie */
1122 tmp_ext.cookie_out_never = 0; /* false */
1123 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1124 } else if (!tp->rx_opt.cookie_in_always) {
1125 /* redundant indications, but ensure initialization. */
1126 tmp_ext.cookie_out_never = 1; /* true */
1127 tmp_ext.cookie_plus = 0;
1131 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1133 if (want_cookie && !tmp_opt.saw_tstamp)
1134 tcp_clear_options(&tmp_opt);
1136 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1137 tcp_openreq_init(req, &tmp_opt, skb);
1139 treq = inet6_rsk(req);
1140 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1141 treq->loc_addr = ipv6_hdr(skb)->daddr;
1142 if (!want_cookie || tmp_opt.tstamp_ok)
1143 TCP_ECN_create_request(req, tcp_hdr(skb));
1145 treq->iif = sk->sk_bound_dev_if;
1147 /* So that link locals have meaning */
1148 if (!sk->sk_bound_dev_if &&
1149 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1150 treq->iif = inet6_iif(skb);
1153 struct inet_peer *peer = NULL;
1155 if (ipv6_opt_accepted(sk, skb) ||
1156 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1157 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1158 atomic_inc(&skb->users);
1159 treq->pktopts = skb;
1163 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1164 req->cookie_ts = tmp_opt.tstamp_ok;
1168 /* VJ's idea. We save last timestamp seen
1169 * from the destination in peer table, when entering
1170 * state TIME-WAIT, and check against it before
1171 * accepting new connection request.
1173 * If "isn" is not zero, this request hit alive
1174 * timewait bucket, so that all the necessary checks
1175 * are made in the function processing timewait state.
1177 if (tmp_opt.saw_tstamp &&
1178 tcp_death_row.sysctl_tw_recycle &&
1179 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1180 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1181 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1183 inet_peer_refcheck(peer);
1184 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1185 (s32)(peer->tcp_ts - req->ts_recent) >
1187 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1188 goto drop_and_release;
1191 /* Kill the following clause, if you dislike this way. */
1192 else if (!sysctl_tcp_syncookies &&
1193 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1194 (sysctl_max_syn_backlog >> 2)) &&
1195 (!peer || !peer->tcp_ts_stamp) &&
1196 (!dst || !dst_metric(dst, RTAX_RTT))) {
1197 /* Without syncookies last quarter of
1198 * backlog is filled with destinations,
1199 * proven to be alive.
1200 * It means that we continue to communicate
1201 * to destinations, already remembered
1202 * to the moment of synflood.
1204 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1205 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1206 goto drop_and_release;
1209 isn = tcp_v6_init_sequence(skb);
1212 tcp_rsk(req)->snt_isn = isn;
1213 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1215 security_inet_conn_request(sk, skb, req);
1217 if (tcp_v6_send_synack(sk, req,
1218 (struct request_values *)&tmp_ext) ||
1222 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1230 return 0; /* don't send reset */
1233 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1234 struct request_sock *req,
1235 struct dst_entry *dst)
1237 struct inet6_request_sock *treq;
1238 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1239 struct tcp6_sock *newtcp6sk;
1240 struct inet_sock *newinet;
1241 struct tcp_sock *newtp;
1243 struct ipv6_txoptions *opt;
1244 #ifdef CONFIG_TCP_MD5SIG
1245 struct tcp_md5sig_key *key;
1248 if (skb->protocol == htons(ETH_P_IP)) {
1253 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1258 newtcp6sk = (struct tcp6_sock *)newsk;
1259 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1261 newinet = inet_sk(newsk);
1262 newnp = inet6_sk(newsk);
1263 newtp = tcp_sk(newsk);
1265 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1267 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1269 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1271 newnp->rcv_saddr = newnp->saddr;
1273 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1274 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1275 #ifdef CONFIG_TCP_MD5SIG
1276 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1279 newnp->ipv6_ac_list = NULL;
1280 newnp->ipv6_fl_list = NULL;
1281 newnp->pktoptions = NULL;
1283 newnp->mcast_oif = inet6_iif(skb);
1284 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1285 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1288 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1289 * here, tcp_create_openreq_child now does this for us, see the comment in
1290 * that function for the gory details. -acme
1293 /* It is tricky place. Until this moment IPv4 tcp
1294 worked with IPv6 icsk.icsk_af_ops.
1297 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1302 treq = inet6_rsk(req);
1305 if (sk_acceptq_is_full(sk))
1309 dst = inet6_csk_route_req(sk, req);
1314 newsk = tcp_create_openreq_child(sk, req, skb);
1319 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1320 * count here, tcp_create_openreq_child now does this for us, see the
1321 * comment in that function for the gory details. -acme
1324 newsk->sk_gso_type = SKB_GSO_TCPV6;
1325 __ip6_dst_store(newsk, dst, NULL, NULL);
1327 newtcp6sk = (struct tcp6_sock *)newsk;
1328 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1330 newtp = tcp_sk(newsk);
1331 newinet = inet_sk(newsk);
1332 newnp = inet6_sk(newsk);
1334 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1336 newnp->daddr = treq->rmt_addr;
1337 newnp->saddr = treq->loc_addr;
1338 newnp->rcv_saddr = treq->loc_addr;
1339 newsk->sk_bound_dev_if = treq->iif;
1341 /* Now IPv6 options...
1343 First: no IPv4 options.
1345 newinet->inet_opt = NULL;
1346 newnp->ipv6_ac_list = NULL;
1347 newnp->ipv6_fl_list = NULL;
1350 newnp->rxopt.all = np->rxopt.all;
1352 /* Clone pktoptions received with SYN */
1353 newnp->pktoptions = NULL;
1354 if (treq->pktopts != NULL) {
1355 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1356 kfree_skb(treq->pktopts);
1357 treq->pktopts = NULL;
1358 if (newnp->pktoptions)
1359 skb_set_owner_r(newnp->pktoptions, newsk);
1362 newnp->mcast_oif = inet6_iif(skb);
1363 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1364 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1366 /* Clone native IPv6 options from listening socket (if any)
1368 Yes, keeping reference count would be much more clever,
1369 but we make one more one thing there: reattach optmem
1373 newnp->opt = ipv6_dup_options(newsk, opt);
1375 sock_kfree_s(sk, opt, opt->tot_len);
1378 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1380 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1381 newnp->opt->opt_flen);
1383 tcp_mtup_init(newsk);
1384 tcp_sync_mss(newsk, dst_mtu(dst));
1385 newtp->advmss = dst_metric_advmss(dst);
1386 if (tcp_sk(sk)->rx_opt.user_mss &&
1387 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1388 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1390 tcp_initialize_rcv_mss(newsk);
1391 if (tcp_rsk(req)->snt_synack)
1392 tcp_valid_rtt_meas(newsk,
1393 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1394 newtp->total_retrans = req->retrans;
1396 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1397 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1399 #ifdef CONFIG_TCP_MD5SIG
1400 /* Copy over the MD5 key from the original socket */
1401 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1402 /* We're using one, so create a matching key
1403 * on the newsk structure. If we fail to get
1404 * memory, then we end up not copying the key
1407 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1408 AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1412 if (__inet_inherit_port(sk, newsk) < 0) {
1416 __inet6_hash(newsk, NULL);
1421 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1423 if (opt && opt != np->opt)
1424 sock_kfree_s(sk, opt, opt->tot_len);
1427 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1431 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1433 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1434 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1435 &ipv6_hdr(skb)->daddr, skb->csum)) {
1436 skb->ip_summed = CHECKSUM_UNNECESSARY;
1441 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1442 &ipv6_hdr(skb)->saddr,
1443 &ipv6_hdr(skb)->daddr, 0));
1445 if (skb->len <= 76) {
1446 return __skb_checksum_complete(skb);
1451 /* The socket must have it's spinlock held when we get
1454 * We have a potential double-lock case here, so even when
1455 * doing backlog processing we use the BH locking scheme.
1456 * This is because we cannot sleep with the original spinlock
1459 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1461 struct ipv6_pinfo *np = inet6_sk(sk);
1462 struct tcp_sock *tp;
1463 struct sk_buff *opt_skb = NULL;
1465 /* Imagine: socket is IPv6. IPv4 packet arrives,
1466 goes to IPv4 receive handler and backlogged.
1467 From backlog it always goes here. Kerboom...
1468 Fortunately, tcp_rcv_established and rcv_established
1469 handle them correctly, but it is not case with
1470 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1473 if (skb->protocol == htons(ETH_P_IP))
1474 return tcp_v4_do_rcv(sk, skb);
1476 #ifdef CONFIG_TCP_MD5SIG
1477 if (tcp_v6_inbound_md5_hash (sk, skb))
1481 if (sk_filter(sk, skb))
1485 * socket locking is here for SMP purposes as backlog rcv
1486 * is currently called with bh processing disabled.
1489 /* Do Stevens' IPV6_PKTOPTIONS.
1491 Yes, guys, it is the only place in our code, where we
1492 may make it not affecting IPv4.
1493 The rest of code is protocol independent,
1494 and I do not like idea to uglify IPv4.
1496 Actually, all the idea behind IPV6_PKTOPTIONS
1497 looks not very well thought. For now we latch
1498 options, received in the last packet, enqueued
1499 by tcp. Feel free to propose better solution.
1503 opt_skb = skb_clone(skb, GFP_ATOMIC);
1505 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1506 sock_rps_save_rxhash(sk, skb);
1507 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1510 goto ipv6_pktoptions;
1514 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1517 if (sk->sk_state == TCP_LISTEN) {
1518 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1523 * Queue it on the new socket if the new socket is active,
1524 * otherwise we just shortcircuit this and continue with
1528 sock_rps_save_rxhash(nsk, skb);
1529 if (tcp_child_process(sk, nsk, skb))
1532 __kfree_skb(opt_skb);
1536 sock_rps_save_rxhash(sk, skb);
1538 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1541 goto ipv6_pktoptions;
1545 tcp_v6_send_reset(sk, skb);
1548 __kfree_skb(opt_skb);
1552 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1557 /* Do you ask, what is it?
1559 1. skb was enqueued by tcp.
1560 2. skb is added to tail of read queue, rather than out of order.
1561 3. socket is not in passive state.
1562 4. Finally, it really contains options, which user wants to receive.
1565 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1566 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1567 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1568 np->mcast_oif = inet6_iif(opt_skb);
1569 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1570 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1571 if (np->rxopt.bits.rxtclass)
1572 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1573 if (ipv6_opt_accepted(sk, opt_skb)) {
1574 skb_set_owner_r(opt_skb, sk);
1575 opt_skb = xchg(&np->pktoptions, opt_skb);
1577 __kfree_skb(opt_skb);
1578 opt_skb = xchg(&np->pktoptions, NULL);
1586 static int tcp_v6_rcv(struct sk_buff *skb)
1588 const struct tcphdr *th;
1589 const struct ipv6hdr *hdr;
1592 struct net *net = dev_net(skb->dev);
1594 if (skb->pkt_type != PACKET_HOST)
1598 * Count it even if it's bad.
1600 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1602 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1607 if (th->doff < sizeof(struct tcphdr)/4)
1609 if (!pskb_may_pull(skb, th->doff*4))
1612 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1616 hdr = ipv6_hdr(skb);
1617 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1618 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1619 skb->len - th->doff*4);
1620 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1621 TCP_SKB_CB(skb)->when = 0;
1622 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1623 TCP_SKB_CB(skb)->sacked = 0;
1625 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1630 if (sk->sk_state == TCP_TIME_WAIT)
1633 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1634 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1635 goto discard_and_relse;
1638 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1639 goto discard_and_relse;
1641 if (sk_filter(sk, skb))
1642 goto discard_and_relse;
1646 bh_lock_sock_nested(sk);
1648 if (!sock_owned_by_user(sk)) {
1649 #ifdef CONFIG_NET_DMA
1650 struct tcp_sock *tp = tcp_sk(sk);
1651 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1652 tp->ucopy.dma_chan = net_dma_find_channel();
1653 if (tp->ucopy.dma_chan)
1654 ret = tcp_v6_do_rcv(sk, skb);
1658 if (!tcp_prequeue(sk, skb))
1659 ret = tcp_v6_do_rcv(sk, skb);
1661 } else if (unlikely(sk_add_backlog(sk, skb))) {
1663 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1664 goto discard_and_relse;
1669 return ret ? -1 : 0;
1672 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1675 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1677 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1679 tcp_v6_send_reset(NULL, skb);
1696 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1697 inet_twsk_put(inet_twsk(sk));
1701 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1702 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1703 inet_twsk_put(inet_twsk(sk));
1707 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1712 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1713 &ipv6_hdr(skb)->daddr,
1714 ntohs(th->dest), inet6_iif(skb));
1716 struct inet_timewait_sock *tw = inet_twsk(sk);
1717 inet_twsk_deschedule(tw, &tcp_death_row);
1722 /* Fall through to ACK */
1725 tcp_v6_timewait_ack(sk, skb);
1729 case TCP_TW_SUCCESS:;
1734 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1736 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1737 struct ipv6_pinfo *np = inet6_sk(sk);
1738 struct inet_peer *peer;
1741 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1742 peer = inet_getpeer_v6(&np->daddr, 1);
1746 rt6_bind_peer(rt, 1);
1747 peer = rt->rt6i_peer;
1748 *release_it = false;
1754 static void *tcp_v6_tw_get_peer(struct sock *sk)
1756 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1757 const struct inet_timewait_sock *tw = inet_twsk(sk);
1759 if (tw->tw_family == AF_INET)
1760 return tcp_v4_tw_get_peer(sk);
1762 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1765 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1766 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1767 .twsk_unique = tcp_twsk_unique,
1768 .twsk_destructor= tcp_twsk_destructor,
1769 .twsk_getpeer = tcp_v6_tw_get_peer,
1772 static const struct inet_connection_sock_af_ops ipv6_specific = {
1773 .queue_xmit = inet6_csk_xmit,
1774 .send_check = tcp_v6_send_check,
1775 .rebuild_header = inet6_sk_rebuild_header,
1776 .conn_request = tcp_v6_conn_request,
1777 .syn_recv_sock = tcp_v6_syn_recv_sock,
1778 .get_peer = tcp_v6_get_peer,
1779 .net_header_len = sizeof(struct ipv6hdr),
1780 .setsockopt = ipv6_setsockopt,
1781 .getsockopt = ipv6_getsockopt,
1782 .addr2sockaddr = inet6_csk_addr2sockaddr,
1783 .sockaddr_len = sizeof(struct sockaddr_in6),
1784 .bind_conflict = inet6_csk_bind_conflict,
1785 #ifdef CONFIG_COMPAT
1786 .compat_setsockopt = compat_ipv6_setsockopt,
1787 .compat_getsockopt = compat_ipv6_getsockopt,
1791 #ifdef CONFIG_TCP_MD5SIG
1792 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1793 .md5_lookup = tcp_v6_md5_lookup,
1794 .calc_md5_hash = tcp_v6_md5_hash_skb,
1795 .md5_parse = tcp_v6_parse_md5_keys,
1800 * TCP over IPv4 via INET6 API
1803 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1804 .queue_xmit = ip_queue_xmit,
1805 .send_check = tcp_v4_send_check,
1806 .rebuild_header = inet_sk_rebuild_header,
1807 .conn_request = tcp_v6_conn_request,
1808 .syn_recv_sock = tcp_v6_syn_recv_sock,
1809 .get_peer = tcp_v4_get_peer,
1810 .net_header_len = sizeof(struct iphdr),
1811 .setsockopt = ipv6_setsockopt,
1812 .getsockopt = ipv6_getsockopt,
1813 .addr2sockaddr = inet6_csk_addr2sockaddr,
1814 .sockaddr_len = sizeof(struct sockaddr_in6),
1815 .bind_conflict = inet6_csk_bind_conflict,
1816 #ifdef CONFIG_COMPAT
1817 .compat_setsockopt = compat_ipv6_setsockopt,
1818 .compat_getsockopt = compat_ipv6_getsockopt,
1822 #ifdef CONFIG_TCP_MD5SIG
1823 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1824 .md5_lookup = tcp_v4_md5_lookup,
1825 .calc_md5_hash = tcp_v4_md5_hash_skb,
1826 .md5_parse = tcp_v6_parse_md5_keys,
1830 /* NOTE: A lot of things set to zero explicitly by call to
1831 * sk_alloc() so need not be done here.
1833 static int tcp_v6_init_sock(struct sock *sk)
1835 struct inet_connection_sock *icsk = inet_csk(sk);
1836 struct tcp_sock *tp = tcp_sk(sk);
1838 skb_queue_head_init(&tp->out_of_order_queue);
1839 tcp_init_xmit_timers(sk);
1840 tcp_prequeue_init(tp);
1842 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1843 tp->mdev = TCP_TIMEOUT_INIT;
1845 /* So many TCP implementations out there (incorrectly) count the
1846 * initial SYN frame in their delayed-ACK and congestion control
1847 * algorithms that we must have the following bandaid to talk
1848 * efficiently to them. -DaveM
1852 /* See draft-stevens-tcpca-spec-01 for discussion of the
1853 * initialization of these values.
1855 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1856 tp->snd_cwnd_clamp = ~0;
1857 tp->mss_cache = TCP_MSS_DEFAULT;
1859 tp->reordering = sysctl_tcp_reordering;
1861 sk->sk_state = TCP_CLOSE;
1863 icsk->icsk_af_ops = &ipv6_specific;
1864 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1865 icsk->icsk_sync_mss = tcp_sync_mss;
1866 sk->sk_write_space = sk_stream_write_space;
1867 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1869 #ifdef CONFIG_TCP_MD5SIG
1870 tp->af_specific = &tcp_sock_ipv6_specific;
1873 /* TCP Cookie Transactions */
1874 if (sysctl_tcp_cookie_size > 0) {
1875 /* Default, cookies without s_data_payload. */
1877 kzalloc(sizeof(*tp->cookie_values),
1879 if (tp->cookie_values != NULL)
1880 kref_init(&tp->cookie_values->kref);
1882 /* Presumed zeroed, in order of appearance:
1883 * cookie_in_always, cookie_out_never,
1884 * s_data_constant, s_data_in, s_data_out
1886 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1887 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1890 sock_update_memcg(sk);
1891 sk_sockets_allocated_inc(sk);
1897 static void tcp_v6_destroy_sock(struct sock *sk)
1899 tcp_v4_destroy_sock(sk);
1900 inet6_destroy_sock(sk);
1903 #ifdef CONFIG_PROC_FS
1904 /* Proc filesystem TCPv6 sock list dumping. */
1905 static void get_openreq6(struct seq_file *seq,
1906 const struct sock *sk, struct request_sock *req, int i, int uid)
1908 int ttd = req->expires - jiffies;
1909 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1910 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1916 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1917 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1919 src->s6_addr32[0], src->s6_addr32[1],
1920 src->s6_addr32[2], src->s6_addr32[3],
1921 ntohs(inet_rsk(req)->loc_port),
1922 dest->s6_addr32[0], dest->s6_addr32[1],
1923 dest->s6_addr32[2], dest->s6_addr32[3],
1924 ntohs(inet_rsk(req)->rmt_port),
1926 0,0, /* could print option size, but that is af dependent. */
1927 1, /* timers active (only the expire timer) */
1928 jiffies_to_clock_t(ttd),
1931 0, /* non standard timer */
1932 0, /* open_requests have no inode */
1936 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1938 const struct in6_addr *dest, *src;
1941 unsigned long timer_expires;
1942 const struct inet_sock *inet = inet_sk(sp);
1943 const struct tcp_sock *tp = tcp_sk(sp);
1944 const struct inet_connection_sock *icsk = inet_csk(sp);
1945 const struct ipv6_pinfo *np = inet6_sk(sp);
1948 src = &np->rcv_saddr;
1949 destp = ntohs(inet->inet_dport);
1950 srcp = ntohs(inet->inet_sport);
1952 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1954 timer_expires = icsk->icsk_timeout;
1955 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1957 timer_expires = icsk->icsk_timeout;
1958 } else if (timer_pending(&sp->sk_timer)) {
1960 timer_expires = sp->sk_timer.expires;
1963 timer_expires = jiffies;
1967 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1968 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1970 src->s6_addr32[0], src->s6_addr32[1],
1971 src->s6_addr32[2], src->s6_addr32[3], srcp,
1972 dest->s6_addr32[0], dest->s6_addr32[1],
1973 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1975 tp->write_seq-tp->snd_una,
1976 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1978 jiffies_to_clock_t(timer_expires - jiffies),
1979 icsk->icsk_retransmits,
1981 icsk->icsk_probes_out,
1983 atomic_read(&sp->sk_refcnt), sp,
1984 jiffies_to_clock_t(icsk->icsk_rto),
1985 jiffies_to_clock_t(icsk->icsk_ack.ato),
1986 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1988 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1992 static void get_timewait6_sock(struct seq_file *seq,
1993 struct inet_timewait_sock *tw, int i)
1995 const struct in6_addr *dest, *src;
1997 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1998 int ttd = tw->tw_ttd - jiffies;
2003 dest = &tw6->tw_v6_daddr;
2004 src = &tw6->tw_v6_rcv_saddr;
2005 destp = ntohs(tw->tw_dport);
2006 srcp = ntohs(tw->tw_sport);
2009 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2010 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2012 src->s6_addr32[0], src->s6_addr32[1],
2013 src->s6_addr32[2], src->s6_addr32[3], srcp,
2014 dest->s6_addr32[0], dest->s6_addr32[1],
2015 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2016 tw->tw_substate, 0, 0,
2017 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2018 atomic_read(&tw->tw_refcnt), tw);
2021 static int tcp6_seq_show(struct seq_file *seq, void *v)
2023 struct tcp_iter_state *st;
2025 if (v == SEQ_START_TOKEN) {
2030 "st tx_queue rx_queue tr tm->when retrnsmt"
2031 " uid timeout inode\n");
2036 switch (st->state) {
2037 case TCP_SEQ_STATE_LISTENING:
2038 case TCP_SEQ_STATE_ESTABLISHED:
2039 get_tcp6_sock(seq, v, st->num);
2041 case TCP_SEQ_STATE_OPENREQ:
2042 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2044 case TCP_SEQ_STATE_TIME_WAIT:
2045 get_timewait6_sock(seq, v, st->num);
2052 static const struct file_operations tcp6_afinfo_seq_fops = {
2053 .owner = THIS_MODULE,
2054 .open = tcp_seq_open,
2056 .llseek = seq_lseek,
2057 .release = seq_release_net
2060 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2063 .seq_fops = &tcp6_afinfo_seq_fops,
2065 .show = tcp6_seq_show,
2069 int __net_init tcp6_proc_init(struct net *net)
2071 return tcp_proc_register(net, &tcp6_seq_afinfo);
2074 void tcp6_proc_exit(struct net *net)
2076 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2080 struct proto tcpv6_prot = {
2082 .owner = THIS_MODULE,
2084 .connect = tcp_v6_connect,
2085 .disconnect = tcp_disconnect,
2086 .accept = inet_csk_accept,
2088 .init = tcp_v6_init_sock,
2089 .destroy = tcp_v6_destroy_sock,
2090 .shutdown = tcp_shutdown,
2091 .setsockopt = tcp_setsockopt,
2092 .getsockopt = tcp_getsockopt,
2093 .recvmsg = tcp_recvmsg,
2094 .sendmsg = tcp_sendmsg,
2095 .sendpage = tcp_sendpage,
2096 .backlog_rcv = tcp_v6_do_rcv,
2097 .hash = tcp_v6_hash,
2098 .unhash = inet_unhash,
2099 .get_port = inet_csk_get_port,
2100 .enter_memory_pressure = tcp_enter_memory_pressure,
2101 .sockets_allocated = &tcp_sockets_allocated,
2102 .memory_allocated = &tcp_memory_allocated,
2103 .memory_pressure = &tcp_memory_pressure,
2104 .orphan_count = &tcp_orphan_count,
2105 .sysctl_wmem = sysctl_tcp_wmem,
2106 .sysctl_rmem = sysctl_tcp_rmem,
2107 .max_header = MAX_TCP_HEADER,
2108 .obj_size = sizeof(struct tcp6_sock),
2109 .slab_flags = SLAB_DESTROY_BY_RCU,
2110 .twsk_prot = &tcp6_timewait_sock_ops,
2111 .rsk_prot = &tcp6_request_sock_ops,
2112 .h.hashinfo = &tcp_hashinfo,
2113 .no_autobind = true,
2114 #ifdef CONFIG_COMPAT
2115 .compat_setsockopt = compat_tcp_setsockopt,
2116 .compat_getsockopt = compat_tcp_getsockopt,
2118 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2119 .proto_cgroup = tcp_proto_cgroup,
2123 static const struct inet6_protocol tcpv6_protocol = {
2124 .handler = tcp_v6_rcv,
2125 .err_handler = tcp_v6_err,
2126 .gso_send_check = tcp_v6_gso_send_check,
2127 .gso_segment = tcp_tso_segment,
2128 .gro_receive = tcp6_gro_receive,
2129 .gro_complete = tcp6_gro_complete,
2130 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2133 static struct inet_protosw tcpv6_protosw = {
2134 .type = SOCK_STREAM,
2135 .protocol = IPPROTO_TCP,
2136 .prot = &tcpv6_prot,
2137 .ops = &inet6_stream_ops,
2139 .flags = INET_PROTOSW_PERMANENT |
2143 static int __net_init tcpv6_net_init(struct net *net)
2145 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2146 SOCK_RAW, IPPROTO_TCP, net);
2149 static void __net_exit tcpv6_net_exit(struct net *net)
2151 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2154 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2156 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2159 static struct pernet_operations tcpv6_net_ops = {
2160 .init = tcpv6_net_init,
2161 .exit = tcpv6_net_exit,
2162 .exit_batch = tcpv6_net_exit_batch,
2165 int __init tcpv6_init(void)
2169 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2173 /* register inet6 protocol */
2174 ret = inet6_register_protosw(&tcpv6_protosw);
2176 goto out_tcpv6_protocol;
2178 ret = register_pernet_subsys(&tcpv6_net_ops);
2180 goto out_tcpv6_protosw;
2185 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2187 inet6_unregister_protosw(&tcpv6_protosw);
2191 void tcpv6_exit(void)
2193 unregister_pernet_subsys(&tcpv6_net_ops);
2194 inet6_unregister_protosw(&tcpv6_protosw);
2195 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);