2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
152 struct ip_options_rcu *inet_opt;
154 if (addr_len < sizeof(struct sockaddr_in))
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
160 nexthop = daddr = usin->sin_addr.s_addr;
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
166 nexthop = inet_opt->opt.faddr;
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 orig_sport, orig_dport, sk, true);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
188 if (!inet_opt || !inet_opt->opt.srr)
191 if (!inet->inet_saddr)
192 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr;
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
199 if (likely(!tp->repair))
203 if (tcp_death_row.sysctl_tw_recycle &&
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
210 inet_csk(sk)->icsk_ext_hdr_len = 0;
212 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
214 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
221 tcp_set_state(sk, TCP_SYN_SENT);
222 err = inet_hash_connect(&tcp_death_row, sk);
226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227 inet->inet_sport, inet->inet_dport, sk);
233 /* OK, now commit destination to socket. */
234 sk->sk_gso_type = SKB_GSO_TCPV4;
235 sk_setup_caps(sk, &rt->dst);
237 if (!tp->write_seq && likely(!tp->repair))
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
243 inet->inet_id = tp->write_seq ^ jiffies;
245 err = tcp_connect(sk);
255 * This unhashes the socket and releases the local port,
258 tcp_set_state(sk, TCP_CLOSE);
260 sk->sk_route_caps = 0;
261 inet->inet_dport = 0;
264 EXPORT_SYMBOL(tcp_v4_connect);
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
271 void tcp_v4_mtu_reduced(struct sock *sk)
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info;
277 dst = inet_csk_update_pmtu(sk, mtu);
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
284 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
285 sk->sk_err_soft = EMSGSIZE;
289 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
290 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
291 tcp_sync_mss(sk, mtu);
293 /* Resend the TCP packet because it's
294 * clear that the old packet has been
295 * dropped. This is the new "fast" path mtu
298 tcp_simple_retransmit(sk);
299 } /* else let the usual retransmit timer handle it */
301 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
303 static void do_redirect(struct sk_buff *skb, struct sock *sk)
305 struct dst_entry *dst = __sk_dst_check(sk, 0);
308 dst->ops->redirect(dst, sk, skb);
312 * This routine is called by the ICMP module when it gets some
313 * sort of error condition. If err < 0 then the socket should
314 * be closed and the error returned to the user. If err > 0
315 * it's just the icmp type << 8 | icmp code. After adjustment
316 * header points to the first 8 bytes of the tcp header. We need
317 * to find the appropriate port.
319 * The locking strategy used here is very "optimistic". When
320 * someone else accesses the socket the ICMP is just dropped
321 * and for some paths there is no check at all.
322 * A more general error queue to queue errors for later handling
323 * is probably better.
327 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
329 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
330 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
331 struct inet_connection_sock *icsk;
333 struct inet_sock *inet;
334 const int type = icmp_hdr(icmp_skb)->type;
335 const int code = icmp_hdr(icmp_skb)->code;
338 struct request_sock *req;
342 struct net *net = dev_net(icmp_skb->dev);
344 if (icmp_skb->len < (iph->ihl << 2) + 8) {
345 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
349 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
350 iph->saddr, th->source, inet_iif(icmp_skb));
352 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
355 if (sk->sk_state == TCP_TIME_WAIT) {
356 inet_twsk_put(inet_twsk(sk));
361 /* If too many ICMPs get dropped on busy
362 * servers this needs to be solved differently.
363 * We do take care of PMTU discovery (RFC1191) special case :
364 * we can receive locally generated ICMP messages while socket is held.
366 if (sock_owned_by_user(sk)) {
367 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
370 if (sk->sk_state == TCP_CLOSE)
373 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
380 req = tp->fastopen_rsk;
381 seq = ntohl(th->seq);
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, tp->snd_una, tp->snd_nxt) &&
384 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
385 /* For a Fast Open socket, allow seq to be snt_isn. */
386 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
392 do_redirect(icmp_skb, sk);
394 case ICMP_SOURCE_QUENCH:
395 /* Just silently ignore these. */
397 case ICMP_PARAMETERPROB:
400 case ICMP_DEST_UNREACH:
401 if (code > NR_ICMP_UNREACH)
404 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
405 /* We are not interested in TCP_LISTEN and open_requests
406 * (SYN-ACKs send out by Linux are always <576bytes so
407 * they should go through unfragmented).
409 if (sk->sk_state == TCP_LISTEN)
413 if (!sock_owned_by_user(sk)) {
414 tcp_v4_mtu_reduced(sk);
416 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
422 err = icmp_err_convert[code].errno;
423 /* check if icmp_skb allows revert of backoff
424 * (see draft-zimmermann-tcp-lcd) */
425 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
427 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
431 /* XXX (TFO) - revisit the following logic for TFO */
433 if (sock_owned_by_user(sk))
436 icsk->icsk_backoff--;
437 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
438 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
441 skb = tcp_write_queue_head(sk);
444 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
445 tcp_time_stamp - TCP_SKB_CB(skb)->when);
448 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
449 remaining, TCP_RTO_MAX);
451 /* RTO revert clocked out retransmission.
452 * Will retransmit now */
453 tcp_retransmit_timer(sk);
457 case ICMP_TIME_EXCEEDED:
464 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
465 * than following the TCP_SYN_RECV case and closing the socket,
466 * we ignore the ICMP error and keep trying like a fully established
467 * socket. Is this the right thing to do?
469 if (req && req->sk == NULL)
472 switch (sk->sk_state) {
473 struct request_sock *req, **prev;
475 if (sock_owned_by_user(sk))
478 req = inet_csk_search_req(sk, &prev, th->dest,
479 iph->daddr, iph->saddr);
483 /* ICMPs are not backlogged, hence we cannot get
484 an established socket here.
488 if (seq != tcp_rsk(req)->snt_isn) {
489 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
494 * Still in SYN_RECV, just remove it silently.
495 * There is no good way to pass the error to the newly
496 * created socket, and POSIX does not want network
497 * errors returned from accept().
499 inet_csk_reqsk_queue_drop(sk, req, prev);
500 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
504 case TCP_SYN_RECV: /* Cannot happen.
505 It can f.e. if SYNs crossed,
508 if (!sock_owned_by_user(sk)) {
511 sk->sk_error_report(sk);
515 sk->sk_err_soft = err;
520 /* If we've already connected we will keep trying
521 * until we time out, or the user gives up.
523 * rfc1122 4.2.3.9 allows to consider as hard errors
524 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
525 * but it is obsoleted by pmtu discovery).
527 * Note, that in modern internet, where routing is unreliable
528 * and in each dark corner broken firewalls sit, sending random
529 * errors ordered by their masters even this two messages finally lose
530 * their original sense (even Linux sends invalid PORT_UNREACHs)
532 * Now we are in compliance with RFCs.
537 if (!sock_owned_by_user(sk) && inet->recverr) {
539 sk->sk_error_report(sk);
540 } else { /* Only an error on timeout */
541 sk->sk_err_soft = err;
549 static void __tcp_v4_send_check(struct sk_buff *skb,
550 __be32 saddr, __be32 daddr)
552 struct tcphdr *th = tcp_hdr(skb);
554 if (skb->ip_summed == CHECKSUM_PARTIAL) {
555 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
556 skb->csum_start = skb_transport_header(skb) - skb->head;
557 skb->csum_offset = offsetof(struct tcphdr, check);
559 th->check = tcp_v4_check(skb->len, saddr, daddr,
566 /* This routine computes an IPv4 TCP checksum. */
567 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
569 const struct inet_sock *inet = inet_sk(sk);
571 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
573 EXPORT_SYMBOL(tcp_v4_send_check);
575 int tcp_v4_gso_send_check(struct sk_buff *skb)
577 const struct iphdr *iph;
580 if (!pskb_may_pull(skb, sizeof(*th)))
587 skb->ip_summed = CHECKSUM_PARTIAL;
588 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
593 * This routine will send an RST to the other tcp.
595 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
597 * Answer: if a packet caused RST, it is not for a socket
598 * existing in our system, if it is matched to a socket,
599 * it is just duplicate segment or bug in other side's TCP.
600 * So that we build reply only basing on parameters
601 * arrived with segment.
602 * Exception: precedence violation. We do not implement it in any case.
605 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
607 const struct tcphdr *th = tcp_hdr(skb);
610 #ifdef CONFIG_TCP_MD5SIG
611 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
614 struct ip_reply_arg arg;
615 #ifdef CONFIG_TCP_MD5SIG
616 struct tcp_md5sig_key *key;
617 const __u8 *hash_location = NULL;
618 unsigned char newhash[16];
620 struct sock *sk1 = NULL;
624 /* Never send a reset in response to a reset. */
628 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
631 /* Swap the send and the receive. */
632 memset(&rep, 0, sizeof(rep));
633 rep.th.dest = th->source;
634 rep.th.source = th->dest;
635 rep.th.doff = sizeof(struct tcphdr) / 4;
639 rep.th.seq = th->ack_seq;
642 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
643 skb->len - (th->doff << 2));
646 memset(&arg, 0, sizeof(arg));
647 arg.iov[0].iov_base = (unsigned char *)&rep;
648 arg.iov[0].iov_len = sizeof(rep.th);
650 #ifdef CONFIG_TCP_MD5SIG
651 hash_location = tcp_parse_md5sig_option(th);
652 if (!sk && hash_location) {
654 * active side is lost. Try to find listening socket through
655 * source port, and then find md5 key through listening socket.
656 * we are not loose security here:
657 * Incoming packet is checked with md5 hash with finding key,
658 * no RST generated if md5 hash doesn't match.
660 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
661 &tcp_hashinfo, ip_hdr(skb)->saddr,
662 th->source, ip_hdr(skb)->daddr,
663 ntohs(th->source), inet_iif(skb));
664 /* don't send rst if it can't find key */
668 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
669 &ip_hdr(skb)->saddr, AF_INET);
673 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
674 if (genhash || memcmp(hash_location, newhash, 16) != 0)
677 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
683 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
685 (TCPOPT_MD5SIG << 8) |
687 /* Update length and the length the header thinks exists */
688 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
689 rep.th.doff = arg.iov[0].iov_len / 4;
691 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
692 key, ip_hdr(skb)->saddr,
693 ip_hdr(skb)->daddr, &rep.th);
696 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
697 ip_hdr(skb)->saddr, /* XXX */
698 arg.iov[0].iov_len, IPPROTO_TCP, 0);
699 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
700 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
701 /* When socket is gone, all binding information is lost.
702 * routing might fail in this case. No choice here, if we choose to force
703 * input interface, we will misroute in case of asymmetric route.
706 arg.bound_dev_if = sk->sk_bound_dev_if;
708 net = dev_net(skb_dst(skb)->dev);
709 arg.tos = ip_hdr(skb)->tos;
710 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
711 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
713 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
714 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
716 #ifdef CONFIG_TCP_MD5SIG
725 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
726 outside socket context is ugly, certainly. What can I do?
729 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
730 u32 win, u32 tsval, u32 tsecr, int oif,
731 struct tcp_md5sig_key *key,
732 int reply_flags, u8 tos)
734 const struct tcphdr *th = tcp_hdr(skb);
737 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
738 #ifdef CONFIG_TCP_MD5SIG
739 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
743 struct ip_reply_arg arg;
744 struct net *net = dev_net(skb_dst(skb)->dev);
746 memset(&rep.th, 0, sizeof(struct tcphdr));
747 memset(&arg, 0, sizeof(arg));
749 arg.iov[0].iov_base = (unsigned char *)&rep;
750 arg.iov[0].iov_len = sizeof(rep.th);
752 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
753 (TCPOPT_TIMESTAMP << 8) |
755 rep.opt[1] = htonl(tsval);
756 rep.opt[2] = htonl(tsecr);
757 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
760 /* Swap the send and the receive. */
761 rep.th.dest = th->source;
762 rep.th.source = th->dest;
763 rep.th.doff = arg.iov[0].iov_len / 4;
764 rep.th.seq = htonl(seq);
765 rep.th.ack_seq = htonl(ack);
767 rep.th.window = htons(win);
769 #ifdef CONFIG_TCP_MD5SIG
771 int offset = (tsecr) ? 3 : 0;
773 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
775 (TCPOPT_MD5SIG << 8) |
777 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
778 rep.th.doff = arg.iov[0].iov_len/4;
780 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
781 key, ip_hdr(skb)->saddr,
782 ip_hdr(skb)->daddr, &rep.th);
785 arg.flags = reply_flags;
786 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
787 ip_hdr(skb)->saddr, /* XXX */
788 arg.iov[0].iov_len, IPPROTO_TCP, 0);
789 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
791 arg.bound_dev_if = oif;
793 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
794 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
796 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
799 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
801 struct inet_timewait_sock *tw = inet_twsk(sk);
802 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
804 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
805 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
806 tcp_time_stamp + tcptw->tw_ts_offset,
809 tcp_twsk_md5_key(tcptw),
810 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
817 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
818 struct request_sock *req)
820 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
821 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
823 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
824 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
825 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
829 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
831 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
836 * Send a SYN-ACK after having received a SYN.
837 * This still operates on a request_sock only, not on a big
840 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
841 struct request_sock *req,
845 const struct inet_request_sock *ireq = inet_rsk(req);
848 struct sk_buff * skb;
850 /* First, grab a route. */
851 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
854 skb = tcp_make_synack(sk, dst, req, NULL);
857 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
859 skb_set_queue_mapping(skb, queue_mapping);
860 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
863 err = net_xmit_eval(err);
864 if (!tcp_rsk(req)->snt_synack && !err)
865 tcp_rsk(req)->snt_synack = tcp_time_stamp;
871 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
873 int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
876 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
881 * IPv4 request_sock destructor.
883 static void tcp_v4_reqsk_destructor(struct request_sock *req)
885 kfree(inet_rsk(req)->opt);
889 * Return true if a syncookie should be sent
891 bool tcp_syn_flood_action(struct sock *sk,
892 const struct sk_buff *skb,
895 const char *msg = "Dropping request";
896 bool want_cookie = false;
897 struct listen_sock *lopt;
901 #ifdef CONFIG_SYN_COOKIES
902 if (sysctl_tcp_syncookies) {
903 msg = "Sending cookies";
905 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
908 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
910 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
911 if (!lopt->synflood_warned) {
912 lopt->synflood_warned = 1;
913 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
914 proto, ntohs(tcp_hdr(skb)->dest), msg);
918 EXPORT_SYMBOL(tcp_syn_flood_action);
921 * Save and compile IPv4 options into the request_sock if needed.
923 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
925 const struct ip_options *opt = &(IPCB(skb)->opt);
926 struct ip_options_rcu *dopt = NULL;
928 if (opt && opt->optlen) {
929 int opt_size = sizeof(*dopt) + opt->optlen;
931 dopt = kmalloc(opt_size, GFP_ATOMIC);
933 if (ip_options_echo(&dopt->opt, skb)) {
942 #ifdef CONFIG_TCP_MD5SIG
944 * RFC2385 MD5 checksumming requires a mapping of
945 * IP address->MD5 Key.
946 * We need to maintain these in the sk structure.
949 /* Find the Key structure for an address. */
950 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
951 const union tcp_md5_addr *addr,
954 struct tcp_sock *tp = tcp_sk(sk);
955 struct tcp_md5sig_key *key;
956 unsigned int size = sizeof(struct in_addr);
957 struct tcp_md5sig_info *md5sig;
959 /* caller either holds rcu_read_lock() or socket lock */
960 md5sig = rcu_dereference_check(tp->md5sig_info,
961 sock_owned_by_user(sk) ||
962 lockdep_is_held(&sk->sk_lock.slock));
965 #if IS_ENABLED(CONFIG_IPV6)
966 if (family == AF_INET6)
967 size = sizeof(struct in6_addr);
969 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
970 if (key->family != family)
972 if (!memcmp(&key->addr, addr, size))
977 EXPORT_SYMBOL(tcp_md5_do_lookup);
979 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
980 struct sock *addr_sk)
982 union tcp_md5_addr *addr;
984 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
985 return tcp_md5_do_lookup(sk, addr, AF_INET);
987 EXPORT_SYMBOL(tcp_v4_md5_lookup);
989 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
990 struct request_sock *req)
992 union tcp_md5_addr *addr;
994 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
995 return tcp_md5_do_lookup(sk, addr, AF_INET);
998 /* This can be called on a newly created socket, from other files */
999 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1000 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1002 /* Add Key to the list */
1003 struct tcp_md5sig_key *key;
1004 struct tcp_sock *tp = tcp_sk(sk);
1005 struct tcp_md5sig_info *md5sig;
1007 key = tcp_md5_do_lookup(sk, addr, family);
1009 /* Pre-existing entry - just update that one. */
1010 memcpy(key->key, newkey, newkeylen);
1011 key->keylen = newkeylen;
1015 md5sig = rcu_dereference_protected(tp->md5sig_info,
1016 sock_owned_by_user(sk));
1018 md5sig = kmalloc(sizeof(*md5sig), gfp);
1022 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1023 INIT_HLIST_HEAD(&md5sig->head);
1024 rcu_assign_pointer(tp->md5sig_info, md5sig);
1027 key = sock_kmalloc(sk, sizeof(*key), gfp);
1030 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1031 sock_kfree_s(sk, key, sizeof(*key));
1035 memcpy(key->key, newkey, newkeylen);
1036 key->keylen = newkeylen;
1037 key->family = family;
1038 memcpy(&key->addr, addr,
1039 (family == AF_INET6) ? sizeof(struct in6_addr) :
1040 sizeof(struct in_addr));
1041 hlist_add_head_rcu(&key->node, &md5sig->head);
1044 EXPORT_SYMBOL(tcp_md5_do_add);
1046 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1048 struct tcp_sock *tp = tcp_sk(sk);
1049 struct tcp_md5sig_key *key;
1050 struct tcp_md5sig_info *md5sig;
1052 key = tcp_md5_do_lookup(sk, addr, family);
1055 hlist_del_rcu(&key->node);
1056 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1057 kfree_rcu(key, rcu);
1058 md5sig = rcu_dereference_protected(tp->md5sig_info,
1059 sock_owned_by_user(sk));
1060 if (hlist_empty(&md5sig->head))
1061 tcp_free_md5sig_pool();
1064 EXPORT_SYMBOL(tcp_md5_do_del);
1066 static void tcp_clear_md5_list(struct sock *sk)
1068 struct tcp_sock *tp = tcp_sk(sk);
1069 struct tcp_md5sig_key *key;
1070 struct hlist_node *n;
1071 struct tcp_md5sig_info *md5sig;
1073 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1075 if (!hlist_empty(&md5sig->head))
1076 tcp_free_md5sig_pool();
1077 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1078 hlist_del_rcu(&key->node);
1079 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1080 kfree_rcu(key, rcu);
1084 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1087 struct tcp_md5sig cmd;
1088 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1090 if (optlen < sizeof(cmd))
1093 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1096 if (sin->sin_family != AF_INET)
1099 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1100 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1103 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1106 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1107 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1111 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1112 __be32 daddr, __be32 saddr, int nbytes)
1114 struct tcp4_pseudohdr *bp;
1115 struct scatterlist sg;
1117 bp = &hp->md5_blk.ip4;
1120 * 1. the TCP pseudo-header (in the order: source IP address,
1121 * destination IP address, zero-padded protocol number, and
1127 bp->protocol = IPPROTO_TCP;
1128 bp->len = cpu_to_be16(nbytes);
1130 sg_init_one(&sg, bp, sizeof(*bp));
1131 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1134 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1135 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1137 struct tcp_md5sig_pool *hp;
1138 struct hash_desc *desc;
1140 hp = tcp_get_md5sig_pool();
1142 goto clear_hash_noput;
1143 desc = &hp->md5_desc;
1145 if (crypto_hash_init(desc))
1147 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1149 if (tcp_md5_hash_header(hp, th))
1151 if (tcp_md5_hash_key(hp, key))
1153 if (crypto_hash_final(desc, md5_hash))
1156 tcp_put_md5sig_pool();
1160 tcp_put_md5sig_pool();
1162 memset(md5_hash, 0, 16);
1166 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1167 const struct sock *sk, const struct request_sock *req,
1168 const struct sk_buff *skb)
1170 struct tcp_md5sig_pool *hp;
1171 struct hash_desc *desc;
1172 const struct tcphdr *th = tcp_hdr(skb);
1173 __be32 saddr, daddr;
1176 saddr = inet_sk(sk)->inet_saddr;
1177 daddr = inet_sk(sk)->inet_daddr;
1179 saddr = inet_rsk(req)->loc_addr;
1180 daddr = inet_rsk(req)->rmt_addr;
1182 const struct iphdr *iph = ip_hdr(skb);
1187 hp = tcp_get_md5sig_pool();
1189 goto clear_hash_noput;
1190 desc = &hp->md5_desc;
1192 if (crypto_hash_init(desc))
1195 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1197 if (tcp_md5_hash_header(hp, th))
1199 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1201 if (tcp_md5_hash_key(hp, key))
1203 if (crypto_hash_final(desc, md5_hash))
1206 tcp_put_md5sig_pool();
1210 tcp_put_md5sig_pool();
1212 memset(md5_hash, 0, 16);
1215 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1217 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1220 * This gets called for each TCP segment that arrives
1221 * so we want to be efficient.
1222 * We have 3 drop cases:
1223 * o No MD5 hash and one expected.
1224 * o MD5 hash and we're not expecting one.
1225 * o MD5 hash and its wrong.
1227 const __u8 *hash_location = NULL;
1228 struct tcp_md5sig_key *hash_expected;
1229 const struct iphdr *iph = ip_hdr(skb);
1230 const struct tcphdr *th = tcp_hdr(skb);
1232 unsigned char newhash[16];
1234 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1236 hash_location = tcp_parse_md5sig_option(th);
1238 /* We've parsed the options - do we have a hash? */
1239 if (!hash_expected && !hash_location)
1242 if (hash_expected && !hash_location) {
1243 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1247 if (!hash_expected && hash_location) {
1248 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1252 /* Okay, so this is hash_expected and hash_location -
1253 * so we need to calculate the checksum.
1255 genhash = tcp_v4_md5_hash_skb(newhash,
1259 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1260 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1261 &iph->saddr, ntohs(th->source),
1262 &iph->daddr, ntohs(th->dest),
1263 genhash ? " tcp_v4_calc_md5_hash failed"
1272 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1274 .obj_size = sizeof(struct tcp_request_sock),
1275 .rtx_syn_ack = tcp_v4_rtx_synack,
1276 .send_ack = tcp_v4_reqsk_send_ack,
1277 .destructor = tcp_v4_reqsk_destructor,
1278 .send_reset = tcp_v4_send_reset,
1279 .syn_ack_timeout = tcp_syn_ack_timeout,
1282 #ifdef CONFIG_TCP_MD5SIG
1283 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1284 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1285 .calc_md5_hash = tcp_v4_md5_hash_skb,
1289 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1290 struct request_sock *req,
1291 struct tcp_fastopen_cookie *foc,
1292 struct tcp_fastopen_cookie *valid_foc)
1294 bool skip_cookie = false;
1295 struct fastopen_queue *fastopenq;
1297 if (likely(!fastopen_cookie_present(foc))) {
1298 /* See include/net/tcp.h for the meaning of these knobs */
1299 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1300 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1301 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1302 skip_cookie = true; /* no cookie to validate */
1306 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1307 /* A FO option is present; bump the counter. */
1308 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1310 /* Make sure the listener has enabled fastopen, and we don't
1311 * exceed the max # of pending TFO requests allowed before trying
1312 * to validating the cookie in order to avoid burning CPU cycles
1315 * XXX (TFO) - The implication of checking the max_qlen before
1316 * processing a cookie request is that clients can't differentiate
1317 * between qlen overflow causing Fast Open to be disabled
1318 * temporarily vs a server not supporting Fast Open at all.
1320 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1321 fastopenq == NULL || fastopenq->max_qlen == 0)
1324 if (fastopenq->qlen >= fastopenq->max_qlen) {
1325 struct request_sock *req1;
1326 spin_lock(&fastopenq->lock);
1327 req1 = fastopenq->rskq_rst_head;
1328 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1329 spin_unlock(&fastopenq->lock);
1330 NET_INC_STATS_BH(sock_net(sk),
1331 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1332 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1336 fastopenq->rskq_rst_head = req1->dl_next;
1338 spin_unlock(&fastopenq->lock);
1342 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1345 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1346 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1347 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1348 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1349 memcmp(&foc->val[0], &valid_foc->val[0],
1350 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1352 valid_foc->len = -1;
1354 /* Acknowledge the data received from the peer. */
1355 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1357 } else if (foc->len == 0) { /* Client requesting a cookie */
1358 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1359 NET_INC_STATS_BH(sock_net(sk),
1360 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1362 /* Client sent a cookie with wrong size. Treat it
1363 * the same as invalid and return a valid one.
1365 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1370 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1371 struct sk_buff *skb,
1372 struct sk_buff *skb_synack,
1373 struct request_sock *req)
1375 struct tcp_sock *tp = tcp_sk(sk);
1376 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1377 const struct inet_request_sock *ireq = inet_rsk(req);
1381 req->num_retrans = 0;
1382 req->num_timeout = 0;
1385 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1386 if (child == NULL) {
1387 NET_INC_STATS_BH(sock_net(sk),
1388 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1389 kfree_skb(skb_synack);
1392 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1393 ireq->rmt_addr, ireq->opt);
1394 err = net_xmit_eval(err);
1396 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1397 /* XXX (TFO) - is it ok to ignore error and continue? */
1399 spin_lock(&queue->fastopenq->lock);
1400 queue->fastopenq->qlen++;
1401 spin_unlock(&queue->fastopenq->lock);
1403 /* Initialize the child socket. Have to fix some values to take
1404 * into account the child is a Fast Open socket and is created
1405 * only out of the bits carried in the SYN packet.
1409 tp->fastopen_rsk = req;
1410 /* Do a hold on the listner sk so that if the listener is being
1411 * closed, the child that has been accepted can live on and still
1412 * access listen_lock.
1415 tcp_rsk(req)->listener = sk;
1417 /* RFC1323: The window in SYN & SYN/ACK segments is never
1418 * scaled. So correct it appropriately.
1420 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1422 /* Activate the retrans timer so that SYNACK can be retransmitted.
1423 * The request socket is not added to the SYN table of the parent
1424 * because it's been added to the accept queue directly.
1426 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1427 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1429 /* Add the child socket directly into the accept queue */
1430 inet_csk_reqsk_queue_add(sk, req, child);
1432 /* Now finish processing the fastopen child socket. */
1433 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1434 tcp_init_congestion_control(child);
1435 tcp_mtup_init(child);
1436 tcp_init_buffer_space(child);
1437 tcp_init_metrics(child);
1439 /* Queue the data carried in the SYN packet. We need to first
1440 * bump skb's refcnt because the caller will attempt to free it.
1442 * XXX (TFO) - we honor a zero-payload TFO request for now.
1443 * (Any reason not to?)
1445 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1446 /* Don't queue the skb if there is no payload in SYN.
1447 * XXX (TFO) - How about SYN+FIN?
1449 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1453 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1454 skb_set_owner_r(skb, child);
1455 __skb_queue_tail(&child->sk_receive_queue, skb);
1456 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1457 tp->syn_data_acked = 1;
1459 sk->sk_data_ready(sk, 0);
1460 bh_unlock_sock(child);
1462 WARN_ON(req->sk == NULL);
1466 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1468 struct tcp_options_received tmp_opt;
1469 struct request_sock *req;
1470 struct inet_request_sock *ireq;
1471 struct tcp_sock *tp = tcp_sk(sk);
1472 struct dst_entry *dst = NULL;
1473 __be32 saddr = ip_hdr(skb)->saddr;
1474 __be32 daddr = ip_hdr(skb)->daddr;
1475 __u32 isn = TCP_SKB_CB(skb)->when;
1476 bool want_cookie = false;
1478 struct tcp_fastopen_cookie foc = { .len = -1 };
1479 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1480 struct sk_buff *skb_synack;
1483 /* Never answer to SYNs send to broadcast or multicast */
1484 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1487 /* TW buckets are converted to open requests without
1488 * limitations, they conserve resources and peer is
1489 * evidently real one.
1491 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1492 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1497 /* Accept backlog is full. If we have already queued enough
1498 * of warm entries in syn queue, drop request. It is better than
1499 * clogging syn queue with openreqs with exponentially increasing
1502 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1503 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1507 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1511 #ifdef CONFIG_TCP_MD5SIG
1512 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1515 tcp_clear_options(&tmp_opt);
1516 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1517 tmp_opt.user_mss = tp->rx_opt.user_mss;
1518 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1520 if (want_cookie && !tmp_opt.saw_tstamp)
1521 tcp_clear_options(&tmp_opt);
1523 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1524 tcp_openreq_init(req, &tmp_opt, skb);
1526 ireq = inet_rsk(req);
1527 ireq->loc_addr = daddr;
1528 ireq->rmt_addr = saddr;
1529 ireq->no_srccheck = inet_sk(sk)->transparent;
1530 ireq->opt = tcp_v4_save_options(skb);
1531 ireq->ir_mark = inet_request_mark(sk, skb);
1533 if (security_inet_conn_request(sk, skb, req))
1536 if (!want_cookie || tmp_opt.tstamp_ok)
1537 TCP_ECN_create_request(req, skb, sock_net(sk));
1540 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1541 req->cookie_ts = tmp_opt.tstamp_ok;
1543 /* VJ's idea. We save last timestamp seen
1544 * from the destination in peer table, when entering
1545 * state TIME-WAIT, and check against it before
1546 * accepting new connection request.
1548 * If "isn" is not zero, this request hit alive
1549 * timewait bucket, so that all the necessary checks
1550 * are made in the function processing timewait state.
1552 if (tmp_opt.saw_tstamp &&
1553 tcp_death_row.sysctl_tw_recycle &&
1554 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1555 fl4.daddr == saddr) {
1556 if (!tcp_peer_is_proven(req, dst, true)) {
1557 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1558 goto drop_and_release;
1561 /* Kill the following clause, if you dislike this way. */
1562 else if (!sysctl_tcp_syncookies &&
1563 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1564 (sysctl_max_syn_backlog >> 2)) &&
1565 !tcp_peer_is_proven(req, dst, false)) {
1566 /* Without syncookies last quarter of
1567 * backlog is filled with destinations,
1568 * proven to be alive.
1569 * It means that we continue to communicate
1570 * to destinations, already remembered
1571 * to the moment of synflood.
1573 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1574 &saddr, ntohs(tcp_hdr(skb)->source));
1575 goto drop_and_release;
1578 isn = tcp_v4_init_sequence(skb);
1580 tcp_rsk(req)->snt_isn = isn;
1583 dst = inet_csk_route_req(sk, &fl4, req);
1587 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1589 /* We don't call tcp_v4_send_synack() directly because we need
1590 * to make sure a child socket can be created successfully before
1591 * sending back synack!
1593 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1594 * (or better yet, call tcp_send_synack() in the child context
1595 * directly, but will have to fix bunch of other code first)
1596 * after syn_recv_sock() except one will need to first fix the
1597 * latter to remove its dependency on the current implementation
1598 * of tcp_v4_send_synack()->tcp_select_initial_window().
1600 skb_synack = tcp_make_synack(sk, dst, req,
1601 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1604 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1605 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1609 if (likely(!do_fastopen)) {
1611 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1612 ireq->rmt_addr, ireq->opt);
1613 err = net_xmit_eval(err);
1614 if (err || want_cookie)
1617 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1618 tcp_rsk(req)->listener = NULL;
1619 /* Add the request_sock to the SYN table */
1620 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1621 if (fastopen_cookie_present(&foc) && foc.len != 0)
1622 NET_INC_STATS_BH(sock_net(sk),
1623 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1624 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
1634 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1637 EXPORT_SYMBOL(tcp_v4_conn_request);
1641 * The three way handshake has completed - we got a valid synack -
1642 * now create the new socket.
1644 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1645 struct request_sock *req,
1646 struct dst_entry *dst)
1648 struct inet_request_sock *ireq;
1649 struct inet_sock *newinet;
1650 struct tcp_sock *newtp;
1652 #ifdef CONFIG_TCP_MD5SIG
1653 struct tcp_md5sig_key *key;
1655 struct ip_options_rcu *inet_opt;
1657 if (sk_acceptq_is_full(sk))
1660 newsk = tcp_create_openreq_child(sk, req, skb);
1664 newsk->sk_gso_type = SKB_GSO_TCPV4;
1665 inet_sk_rx_dst_set(newsk, skb);
1667 newtp = tcp_sk(newsk);
1668 newinet = inet_sk(newsk);
1669 ireq = inet_rsk(req);
1670 newinet->inet_daddr = ireq->rmt_addr;
1671 newinet->inet_rcv_saddr = ireq->loc_addr;
1672 newinet->inet_saddr = ireq->loc_addr;
1673 inet_opt = ireq->opt;
1674 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1676 newinet->mc_index = inet_iif(skb);
1677 newinet->mc_ttl = ip_hdr(skb)->ttl;
1678 newinet->rcv_tos = ip_hdr(skb)->tos;
1679 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1681 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1682 newinet->inet_id = newtp->write_seq ^ jiffies;
1685 dst = inet_csk_route_child_sock(sk, newsk, req);
1689 /* syncookie case : see end of cookie_v4_check() */
1691 sk_setup_caps(newsk, dst);
1693 tcp_mtup_init(newsk);
1694 tcp_sync_mss(newsk, dst_mtu(dst));
1695 newtp->advmss = dst_metric_advmss(dst);
1696 if (tcp_sk(sk)->rx_opt.user_mss &&
1697 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1698 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1700 tcp_initialize_rcv_mss(newsk);
1701 tcp_synack_rtt_meas(newsk, req);
1702 newtp->total_retrans = req->num_retrans;
1704 #ifdef CONFIG_TCP_MD5SIG
1705 /* Copy over the MD5 key from the original socket */
1706 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1710 * We're using one, so create a matching key
1711 * on the newsk structure. If we fail to get
1712 * memory, then we end up not copying the key
1715 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1716 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1717 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1721 if (__inet_inherit_port(sk, newsk) < 0)
1723 __inet_hash_nolisten(newsk, NULL);
1728 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1732 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1735 inet_csk_prepare_forced_close(newsk);
1739 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1741 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1743 struct tcphdr *th = tcp_hdr(skb);
1744 const struct iphdr *iph = ip_hdr(skb);
1746 struct request_sock **prev;
1747 /* Find possible connection requests. */
1748 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1749 iph->saddr, iph->daddr);
1751 return tcp_check_req(sk, skb, req, prev, false);
1753 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1754 th->source, iph->daddr, th->dest, inet_iif(skb));
1757 if (nsk->sk_state != TCP_TIME_WAIT) {
1761 inet_twsk_put(inet_twsk(nsk));
1765 #ifdef CONFIG_SYN_COOKIES
1767 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1772 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1774 const struct iphdr *iph = ip_hdr(skb);
1776 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1777 if (!tcp_v4_check(skb->len, iph->saddr,
1778 iph->daddr, skb->csum)) {
1779 skb->ip_summed = CHECKSUM_UNNECESSARY;
1784 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1785 skb->len, IPPROTO_TCP, 0);
1787 if (skb->len <= 76) {
1788 return __skb_checksum_complete(skb);
1794 /* The socket must have it's spinlock held when we get
1797 * We have a potential double-lock case here, so even when
1798 * doing backlog processing we use the BH locking scheme.
1799 * This is because we cannot sleep with the original spinlock
1802 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1805 #ifdef CONFIG_TCP_MD5SIG
1807 * We really want to reject the packet as early as possible
1809 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1810 * o There is an MD5 option and we're not expecting one
1812 if (tcp_v4_inbound_md5_hash(sk, skb))
1816 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1817 struct dst_entry *dst = sk->sk_rx_dst;
1819 sock_rps_save_rxhash(sk, skb);
1821 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1822 dst->ops->check(dst, 0) == NULL) {
1824 sk->sk_rx_dst = NULL;
1827 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1834 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1837 if (sk->sk_state == TCP_LISTEN) {
1838 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1843 sock_rps_save_rxhash(nsk, skb);
1844 if (tcp_child_process(sk, nsk, skb)) {
1851 sock_rps_save_rxhash(sk, skb);
1853 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1860 tcp_v4_send_reset(rsk, skb);
1863 /* Be careful here. If this function gets more complicated and
1864 * gcc suffers from register pressure on the x86, sk (in %ebx)
1865 * might be destroyed here. This current version compiles correctly,
1866 * but you have been warned.
1871 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1872 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1875 EXPORT_SYMBOL(tcp_v4_do_rcv);
1877 void tcp_v4_early_demux(struct sk_buff *skb)
1879 const struct iphdr *iph;
1880 const struct tcphdr *th;
1883 if (skb->pkt_type != PACKET_HOST)
1886 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1892 if (th->doff < sizeof(struct tcphdr) / 4)
1895 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1896 iph->saddr, th->source,
1897 iph->daddr, ntohs(th->dest),
1901 skb->destructor = sock_edemux;
1902 if (sk->sk_state != TCP_TIME_WAIT) {
1903 struct dst_entry *dst = sk->sk_rx_dst;
1906 dst = dst_check(dst, 0);
1908 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1909 skb_dst_set_noref(skb, dst);
1914 /* Packet is added to VJ-style prequeue for processing in process
1915 * context, if a reader task is waiting. Apparently, this exciting
1916 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1917 * failed somewhere. Latency? Burstiness? Well, at least now we will
1918 * see, why it failed. 8)8) --ANK
1921 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1923 struct tcp_sock *tp = tcp_sk(sk);
1925 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1928 if (skb->len <= tcp_hdrlen(skb) &&
1929 skb_queue_len(&tp->ucopy.prequeue) == 0)
1933 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1934 tp->ucopy.memory += skb->truesize;
1935 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1936 struct sk_buff *skb1;
1938 BUG_ON(sock_owned_by_user(sk));
1940 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1941 sk_backlog_rcv(sk, skb1);
1942 NET_INC_STATS_BH(sock_net(sk),
1943 LINUX_MIB_TCPPREQUEUEDROPPED);
1946 tp->ucopy.memory = 0;
1947 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1948 wake_up_interruptible_sync_poll(sk_sleep(sk),
1949 POLLIN | POLLRDNORM | POLLRDBAND);
1950 if (!inet_csk_ack_scheduled(sk))
1951 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1952 (3 * tcp_rto_min(sk)) / 4,
1957 EXPORT_SYMBOL(tcp_prequeue);
1963 int tcp_v4_rcv(struct sk_buff *skb)
1965 const struct iphdr *iph;
1966 const struct tcphdr *th;
1969 struct net *net = dev_net(skb->dev);
1971 if (skb->pkt_type != PACKET_HOST)
1974 /* Count it even if it's bad */
1975 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1977 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1982 if (th->doff < sizeof(struct tcphdr) / 4)
1984 if (!pskb_may_pull(skb, th->doff * 4))
1987 /* An explanation is required here, I think.
1988 * Packet length and doff are validated by header prediction,
1989 * provided case of th->doff==0 is eliminated.
1990 * So, we defer the checks. */
1991 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1996 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1997 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1998 skb->len - th->doff * 4);
1999 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
2000 TCP_SKB_CB(skb)->when = 0;
2001 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
2002 TCP_SKB_CB(skb)->sacked = 0;
2004 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
2009 if (sk->sk_state == TCP_TIME_WAIT)
2012 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2013 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2014 goto discard_and_relse;
2017 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2018 goto discard_and_relse;
2021 if (sk_filter(sk, skb))
2022 goto discard_and_relse;
2026 bh_lock_sock_nested(sk);
2028 if (!sock_owned_by_user(sk)) {
2029 #ifdef CONFIG_NET_DMA
2030 struct tcp_sock *tp = tcp_sk(sk);
2031 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2032 tp->ucopy.dma_chan = net_dma_find_channel();
2033 if (tp->ucopy.dma_chan)
2034 ret = tcp_v4_do_rcv(sk, skb);
2038 if (!tcp_prequeue(sk, skb))
2039 ret = tcp_v4_do_rcv(sk, skb);
2041 } else if (unlikely(sk_add_backlog(sk, skb,
2042 sk->sk_rcvbuf + sk->sk_sndbuf))) {
2044 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2045 goto discard_and_relse;
2054 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2057 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2059 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
2061 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2063 tcp_v4_send_reset(NULL, skb);
2067 /* Discard frame. */
2076 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2077 inet_twsk_put(inet_twsk(sk));
2081 if (skb->len < (th->doff << 2)) {
2082 inet_twsk_put(inet_twsk(sk));
2085 if (tcp_checksum_complete(skb)) {
2086 inet_twsk_put(inet_twsk(sk));
2089 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2091 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2093 iph->saddr, th->source,
2094 iph->daddr, th->dest,
2097 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2098 inet_twsk_put(inet_twsk(sk));
2102 /* Fall through to ACK */
2105 tcp_v4_timewait_ack(sk, skb);
2109 case TCP_TW_SUCCESS:;
2114 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2115 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2116 .twsk_unique = tcp_twsk_unique,
2117 .twsk_destructor= tcp_twsk_destructor,
2120 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2122 struct dst_entry *dst = skb_dst(skb);
2125 sk->sk_rx_dst = dst;
2126 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2128 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2130 const struct inet_connection_sock_af_ops ipv4_specific = {
2131 .queue_xmit = ip_queue_xmit,
2132 .send_check = tcp_v4_send_check,
2133 .rebuild_header = inet_sk_rebuild_header,
2134 .sk_rx_dst_set = inet_sk_rx_dst_set,
2135 .conn_request = tcp_v4_conn_request,
2136 .syn_recv_sock = tcp_v4_syn_recv_sock,
2137 .net_header_len = sizeof(struct iphdr),
2138 .setsockopt = ip_setsockopt,
2139 .getsockopt = ip_getsockopt,
2140 .addr2sockaddr = inet_csk_addr2sockaddr,
2141 .sockaddr_len = sizeof(struct sockaddr_in),
2142 .bind_conflict = inet_csk_bind_conflict,
2143 #ifdef CONFIG_COMPAT
2144 .compat_setsockopt = compat_ip_setsockopt,
2145 .compat_getsockopt = compat_ip_getsockopt,
2147 .mtu_reduced = tcp_v4_mtu_reduced,
2149 EXPORT_SYMBOL(ipv4_specific);
2151 #ifdef CONFIG_TCP_MD5SIG
2152 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2153 .md5_lookup = tcp_v4_md5_lookup,
2154 .calc_md5_hash = tcp_v4_md5_hash_skb,
2155 .md5_parse = tcp_v4_parse_md5_keys,
2159 /* NOTE: A lot of things set to zero explicitly by call to
2160 * sk_alloc() so need not be done here.
2162 static int tcp_v4_init_sock(struct sock *sk)
2164 struct inet_connection_sock *icsk = inet_csk(sk);
2168 icsk->icsk_af_ops = &ipv4_specific;
2170 #ifdef CONFIG_TCP_MD5SIG
2171 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2177 void tcp_v4_destroy_sock(struct sock *sk)
2179 struct tcp_sock *tp = tcp_sk(sk);
2181 tcp_clear_xmit_timers(sk);
2183 tcp_cleanup_congestion_control(sk);
2185 /* Cleanup up the write buffer. */
2186 tcp_write_queue_purge(sk);
2188 /* Cleans up our, hopefully empty, out_of_order_queue. */
2189 __skb_queue_purge(&tp->out_of_order_queue);
2191 #ifdef CONFIG_TCP_MD5SIG
2192 /* Clean up the MD5 key list, if any */
2193 if (tp->md5sig_info) {
2194 tcp_clear_md5_list(sk);
2195 kfree_rcu(tp->md5sig_info, rcu);
2196 tp->md5sig_info = NULL;
2200 #ifdef CONFIG_NET_DMA
2201 /* Cleans up our sk_async_wait_queue */
2202 __skb_queue_purge(&sk->sk_async_wait_queue);
2205 /* Clean prequeue, it must be empty really */
2206 __skb_queue_purge(&tp->ucopy.prequeue);
2208 /* Clean up a referenced TCP bind bucket. */
2209 if (inet_csk(sk)->icsk_bind_hash)
2212 BUG_ON(tp->fastopen_rsk != NULL);
2214 /* If socket is aborted during connect operation */
2215 tcp_free_fastopen_req(tp);
2217 sk_sockets_allocated_dec(sk);
2218 sock_release_memcg(sk);
2220 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2222 #ifdef CONFIG_PROC_FS
2223 /* Proc filesystem TCP sock list dumping. */
2225 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2227 return hlist_nulls_empty(head) ? NULL :
2228 list_entry(head->first, struct inet_timewait_sock, tw_node);
2231 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2233 return !is_a_nulls(tw->tw_node.next) ?
2234 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2238 * Get next listener socket follow cur. If cur is NULL, get first socket
2239 * starting from bucket given in st->bucket; when st->bucket is zero the
2240 * very first socket in the hash table is returned.
2242 static void *listening_get_next(struct seq_file *seq, void *cur)
2244 struct inet_connection_sock *icsk;
2245 struct hlist_nulls_node *node;
2246 struct sock *sk = cur;
2247 struct inet_listen_hashbucket *ilb;
2248 struct tcp_iter_state *st = seq->private;
2249 struct net *net = seq_file_net(seq);
2252 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2253 spin_lock_bh(&ilb->lock);
2254 sk = sk_nulls_head(&ilb->head);
2258 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2262 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2263 struct request_sock *req = cur;
2265 icsk = inet_csk(st->syn_wait_sk);
2269 if (req->rsk_ops->family == st->family) {
2275 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2278 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2280 sk = sk_nulls_next(st->syn_wait_sk);
2281 st->state = TCP_SEQ_STATE_LISTENING;
2282 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2284 icsk = inet_csk(sk);
2285 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2286 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2288 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2289 sk = sk_nulls_next(sk);
2292 sk_nulls_for_each_from(sk, node) {
2293 if (!net_eq(sock_net(sk), net))
2295 if (sk->sk_family == st->family) {
2299 icsk = inet_csk(sk);
2300 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2301 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2303 st->uid = sock_i_uid(sk);
2304 st->syn_wait_sk = sk;
2305 st->state = TCP_SEQ_STATE_OPENREQ;
2309 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2311 spin_unlock_bh(&ilb->lock);
2313 if (++st->bucket < INET_LHTABLE_SIZE) {
2314 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2315 spin_lock_bh(&ilb->lock);
2316 sk = sk_nulls_head(&ilb->head);
2324 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2326 struct tcp_iter_state *st = seq->private;
2331 rc = listening_get_next(seq, NULL);
2333 while (rc && *pos) {
2334 rc = listening_get_next(seq, rc);
2340 static inline bool empty_bucket(struct tcp_iter_state *st)
2342 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2343 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2347 * Get first established socket starting from bucket given in st->bucket.
2348 * If st->bucket is zero, the very first socket in the hash is returned.
2350 static void *established_get_first(struct seq_file *seq)
2352 struct tcp_iter_state *st = seq->private;
2353 struct net *net = seq_file_net(seq);
2357 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2359 struct hlist_nulls_node *node;
2360 struct inet_timewait_sock *tw;
2361 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2363 /* Lockless fast path for the common case of empty buckets */
2364 if (empty_bucket(st))
2368 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2369 if (sk->sk_family != st->family ||
2370 !net_eq(sock_net(sk), net)) {
2376 st->state = TCP_SEQ_STATE_TIME_WAIT;
2377 inet_twsk_for_each(tw, node,
2378 &tcp_hashinfo.ehash[st->bucket].twchain) {
2379 if (tw->tw_family != st->family ||
2380 !net_eq(twsk_net(tw), net)) {
2386 spin_unlock_bh(lock);
2387 st->state = TCP_SEQ_STATE_ESTABLISHED;
2393 static void *established_get_next(struct seq_file *seq, void *cur)
2395 struct sock *sk = cur;
2396 struct inet_timewait_sock *tw;
2397 struct hlist_nulls_node *node;
2398 struct tcp_iter_state *st = seq->private;
2399 struct net *net = seq_file_net(seq);
2404 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2408 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2415 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2416 st->state = TCP_SEQ_STATE_ESTABLISHED;
2418 /* Look for next non empty bucket */
2420 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2423 if (st->bucket > tcp_hashinfo.ehash_mask)
2426 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2427 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2429 sk = sk_nulls_next(sk);
2431 sk_nulls_for_each_from(sk, node) {
2432 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2436 st->state = TCP_SEQ_STATE_TIME_WAIT;
2437 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2445 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2447 struct tcp_iter_state *st = seq->private;
2451 rc = established_get_first(seq);
2454 rc = established_get_next(seq, rc);
2460 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2463 struct tcp_iter_state *st = seq->private;
2465 st->state = TCP_SEQ_STATE_LISTENING;
2466 rc = listening_get_idx(seq, &pos);
2469 st->state = TCP_SEQ_STATE_ESTABLISHED;
2470 rc = established_get_idx(seq, pos);
2476 static void *tcp_seek_last_pos(struct seq_file *seq)
2478 struct tcp_iter_state *st = seq->private;
2479 int offset = st->offset;
2480 int orig_num = st->num;
2483 switch (st->state) {
2484 case TCP_SEQ_STATE_OPENREQ:
2485 case TCP_SEQ_STATE_LISTENING:
2486 if (st->bucket >= INET_LHTABLE_SIZE)
2488 st->state = TCP_SEQ_STATE_LISTENING;
2489 rc = listening_get_next(seq, NULL);
2490 while (offset-- && rc)
2491 rc = listening_get_next(seq, rc);
2496 case TCP_SEQ_STATE_ESTABLISHED:
2497 case TCP_SEQ_STATE_TIME_WAIT:
2498 st->state = TCP_SEQ_STATE_ESTABLISHED;
2499 if (st->bucket > tcp_hashinfo.ehash_mask)
2501 rc = established_get_first(seq);
2502 while (offset-- && rc)
2503 rc = established_get_next(seq, rc);
2511 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2513 struct tcp_iter_state *st = seq->private;
2516 if (*pos && *pos == st->last_pos) {
2517 rc = tcp_seek_last_pos(seq);
2522 st->state = TCP_SEQ_STATE_LISTENING;
2526 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2529 st->last_pos = *pos;
2533 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2535 struct tcp_iter_state *st = seq->private;
2538 if (v == SEQ_START_TOKEN) {
2539 rc = tcp_get_idx(seq, 0);
2543 switch (st->state) {
2544 case TCP_SEQ_STATE_OPENREQ:
2545 case TCP_SEQ_STATE_LISTENING:
2546 rc = listening_get_next(seq, v);
2548 st->state = TCP_SEQ_STATE_ESTABLISHED;
2551 rc = established_get_first(seq);
2554 case TCP_SEQ_STATE_ESTABLISHED:
2555 case TCP_SEQ_STATE_TIME_WAIT:
2556 rc = established_get_next(seq, v);
2561 st->last_pos = *pos;
2565 static void tcp_seq_stop(struct seq_file *seq, void *v)
2567 struct tcp_iter_state *st = seq->private;
2569 switch (st->state) {
2570 case TCP_SEQ_STATE_OPENREQ:
2572 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2573 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2575 case TCP_SEQ_STATE_LISTENING:
2576 if (v != SEQ_START_TOKEN)
2577 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2579 case TCP_SEQ_STATE_TIME_WAIT:
2580 case TCP_SEQ_STATE_ESTABLISHED:
2582 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2587 int tcp_seq_open(struct inode *inode, struct file *file)
2589 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2590 struct tcp_iter_state *s;
2593 err = seq_open_net(inode, file, &afinfo->seq_ops,
2594 sizeof(struct tcp_iter_state));
2598 s = ((struct seq_file *)file->private_data)->private;
2599 s->family = afinfo->family;
2603 EXPORT_SYMBOL(tcp_seq_open);
2605 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2608 struct proc_dir_entry *p;
2610 afinfo->seq_ops.start = tcp_seq_start;
2611 afinfo->seq_ops.next = tcp_seq_next;
2612 afinfo->seq_ops.stop = tcp_seq_stop;
2614 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2615 afinfo->seq_fops, afinfo);
2620 EXPORT_SYMBOL(tcp_proc_register);
2622 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2624 remove_proc_entry(afinfo->name, net->proc_net);
2626 EXPORT_SYMBOL(tcp_proc_unregister);
2628 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2629 struct seq_file *f, int i, kuid_t uid, int *len)
2631 const struct inet_request_sock *ireq = inet_rsk(req);
2632 long delta = req->expires - jiffies;
2634 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2635 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2638 ntohs(inet_sk(sk)->inet_sport),
2640 ntohs(ireq->rmt_port),
2642 0, 0, /* could print option size, but that is af dependent. */
2643 1, /* timers active (only the expire timer) */
2644 jiffies_delta_to_clock_t(delta),
2646 from_kuid_munged(seq_user_ns(f), uid),
2647 0, /* non standard timer */
2648 0, /* open_requests have no inode */
2649 atomic_read(&sk->sk_refcnt),
2654 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2657 unsigned long timer_expires;
2658 const struct tcp_sock *tp = tcp_sk(sk);
2659 const struct inet_connection_sock *icsk = inet_csk(sk);
2660 const struct inet_sock *inet = inet_sk(sk);
2661 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2662 __be32 dest = inet->inet_daddr;
2663 __be32 src = inet->inet_rcv_saddr;
2664 __u16 destp = ntohs(inet->inet_dport);
2665 __u16 srcp = ntohs(inet->inet_sport);
2668 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2669 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2670 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2672 timer_expires = icsk->icsk_timeout;
2673 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2675 timer_expires = icsk->icsk_timeout;
2676 } else if (timer_pending(&sk->sk_timer)) {
2678 timer_expires = sk->sk_timer.expires;
2681 timer_expires = jiffies;
2684 if (sk->sk_state == TCP_LISTEN)
2685 rx_queue = sk->sk_ack_backlog;
2688 * because we dont lock socket, we might find a transient negative value
2690 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2692 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2693 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2694 i, src, srcp, dest, destp, sk->sk_state,
2695 tp->write_seq - tp->snd_una,
2698 jiffies_delta_to_clock_t(timer_expires - jiffies),
2699 icsk->icsk_retransmits,
2700 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2701 icsk->icsk_probes_out,
2703 atomic_read(&sk->sk_refcnt), sk,
2704 jiffies_to_clock_t(icsk->icsk_rto),
2705 jiffies_to_clock_t(icsk->icsk_ack.ato),
2706 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2708 sk->sk_state == TCP_LISTEN ?
2709 (fastopenq ? fastopenq->max_qlen : 0) :
2710 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2714 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2715 struct seq_file *f, int i, int *len)
2719 long delta = tw->tw_ttd - jiffies;
2721 dest = tw->tw_daddr;
2722 src = tw->tw_rcv_saddr;
2723 destp = ntohs(tw->tw_dport);
2724 srcp = ntohs(tw->tw_sport);
2726 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2727 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2728 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2729 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2730 atomic_read(&tw->tw_refcnt), tw, len);
2735 static int tcp4_seq_show(struct seq_file *seq, void *v)
2737 struct tcp_iter_state *st;
2740 if (v == SEQ_START_TOKEN) {
2741 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2742 " sl local_address rem_address st tx_queue "
2743 "rx_queue tr tm->when retrnsmt uid timeout "
2749 switch (st->state) {
2750 case TCP_SEQ_STATE_LISTENING:
2751 case TCP_SEQ_STATE_ESTABLISHED:
2752 get_tcp4_sock(v, seq, st->num, &len);
2754 case TCP_SEQ_STATE_OPENREQ:
2755 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2757 case TCP_SEQ_STATE_TIME_WAIT:
2758 get_timewait4_sock(v, seq, st->num, &len);
2761 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2766 static const struct file_operations tcp_afinfo_seq_fops = {
2767 .owner = THIS_MODULE,
2768 .open = tcp_seq_open,
2770 .llseek = seq_lseek,
2771 .release = seq_release_net
2774 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2777 .seq_fops = &tcp_afinfo_seq_fops,
2779 .show = tcp4_seq_show,
2783 static int __net_init tcp4_proc_init_net(struct net *net)
2785 return tcp_proc_register(net, &tcp4_seq_afinfo);
2788 static void __net_exit tcp4_proc_exit_net(struct net *net)
2790 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2793 static struct pernet_operations tcp4_net_ops = {
2794 .init = tcp4_proc_init_net,
2795 .exit = tcp4_proc_exit_net,
2798 int __init tcp4_proc_init(void)
2800 return register_pernet_subsys(&tcp4_net_ops);
2803 void tcp4_proc_exit(void)
2805 unregister_pernet_subsys(&tcp4_net_ops);
2807 #endif /* CONFIG_PROC_FS */
2809 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2811 const struct iphdr *iph = skb_gro_network_header(skb);
2815 switch (skb->ip_summed) {
2816 case CHECKSUM_COMPLETE:
2817 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2819 skb->ip_summed = CHECKSUM_UNNECESSARY;
2823 NAPI_GRO_CB(skb)->flush = 1;
2827 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2828 skb_gro_len(skb), IPPROTO_TCP, 0);
2829 sum = csum_fold(skb_checksum(skb,
2830 skb_gro_offset(skb),
2836 skb->ip_summed = CHECKSUM_UNNECESSARY;
2840 return tcp_gro_receive(head, skb);
2843 int tcp4_gro_complete(struct sk_buff *skb)
2845 const struct iphdr *iph = ip_hdr(skb);
2846 struct tcphdr *th = tcp_hdr(skb);
2848 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2849 iph->saddr, iph->daddr, 0);
2850 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2852 return tcp_gro_complete(skb);
2855 struct proto tcp_prot = {
2857 .owner = THIS_MODULE,
2859 .connect = tcp_v4_connect,
2860 .disconnect = tcp_disconnect,
2861 .accept = inet_csk_accept,
2863 .init = tcp_v4_init_sock,
2864 .destroy = tcp_v4_destroy_sock,
2865 .shutdown = tcp_shutdown,
2866 .setsockopt = tcp_setsockopt,
2867 .getsockopt = tcp_getsockopt,
2868 .recvmsg = tcp_recvmsg,
2869 .sendmsg = tcp_sendmsg,
2870 .sendpage = tcp_sendpage,
2871 .backlog_rcv = tcp_v4_do_rcv,
2872 .release_cb = tcp_release_cb,
2874 .unhash = inet_unhash,
2875 .get_port = inet_csk_get_port,
2876 .enter_memory_pressure = tcp_enter_memory_pressure,
2877 .sockets_allocated = &tcp_sockets_allocated,
2878 .orphan_count = &tcp_orphan_count,
2879 .memory_allocated = &tcp_memory_allocated,
2880 .memory_pressure = &tcp_memory_pressure,
2881 .sysctl_wmem = sysctl_tcp_wmem,
2882 .sysctl_rmem = sysctl_tcp_rmem,
2883 .max_header = MAX_TCP_HEADER,
2884 .obj_size = sizeof(struct tcp_sock),
2885 .slab_flags = SLAB_DESTROY_BY_RCU,
2886 .twsk_prot = &tcp_timewait_sock_ops,
2887 .rsk_prot = &tcp_request_sock_ops,
2888 .h.hashinfo = &tcp_hashinfo,
2889 .no_autobind = true,
2890 #ifdef CONFIG_COMPAT
2891 .compat_setsockopt = compat_tcp_setsockopt,
2892 .compat_getsockopt = compat_tcp_getsockopt,
2894 #ifdef CONFIG_MEMCG_KMEM
2895 .init_cgroup = tcp_init_cgroup,
2896 .destroy_cgroup = tcp_destroy_cgroup,
2897 .proto_cgroup = tcp_proto_cgroup,
2900 EXPORT_SYMBOL(tcp_prot);
2902 static int __net_init tcp_sk_init(struct net *net)
2904 net->ipv4.sysctl_tcp_ecn = 2;
2908 static void __net_exit tcp_sk_exit(struct net *net)
2912 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2914 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2917 static struct pernet_operations __net_initdata tcp_sk_ops = {
2918 .init = tcp_sk_init,
2919 .exit = tcp_sk_exit,
2920 .exit_batch = tcp_sk_exit_batch,
2923 void __init tcp_v4_init(void)
2925 inet_hashinfo_init(&tcp_hashinfo);
2926 if (register_pernet_subsys(&tcp_sk_ops))
2927 panic("Failed to create the TCP control socket.\n");