2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 static int tcp_repair_connect(struct sock *sk)
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
152 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 struct inet_sock *inet = inet_sk(sk);
154 struct tcp_sock *tp = tcp_sk(sk);
155 __be16 orig_sport, orig_dport;
156 __be32 daddr, nexthop;
160 struct ip_options_rcu *inet_opt;
162 if (addr_len < sizeof(struct sockaddr_in))
165 if (usin->sin_family != AF_INET)
166 return -EAFNOSUPPORT;
168 nexthop = daddr = usin->sin_addr.s_addr;
169 inet_opt = rcu_dereference_protected(inet->inet_opt,
170 sock_owned_by_user(sk));
171 if (inet_opt && inet_opt->opt.srr) {
174 nexthop = inet_opt->opt.faddr;
177 orig_sport = inet->inet_sport;
178 orig_dport = usin->sin_port;
179 fl4 = &inet->cork.fl.u.ip4;
180 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
183 orig_sport, orig_dport, sk, true);
186 if (err == -ENETUNREACH)
187 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
191 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
196 if (!inet_opt || !inet_opt->opt.srr)
199 if (!inet->inet_saddr)
200 inet->inet_saddr = fl4->saddr;
201 inet->inet_rcv_saddr = inet->inet_saddr;
203 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
204 /* Reset inherited state */
205 tp->rx_opt.ts_recent = 0;
206 tp->rx_opt.ts_recent_stamp = 0;
207 if (likely(!tp->repair))
211 if (tcp_death_row.sysctl_tw_recycle &&
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 tcp_fetch_timewait_stamp(sk, &rt->dst);
215 inet->inet_dport = usin->sin_port;
216 inet->inet_daddr = daddr;
218 inet_csk(sk)->icsk_ext_hdr_len = 0;
220 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
222 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
224 /* Socket identity is still unknown (sport may be zero).
225 * However we set state to SYN-SENT and not releasing socket
226 * lock select source port, enter ourselves into the hash tables and
227 * complete initialization after this.
229 tcp_set_state(sk, TCP_SYN_SENT);
230 err = inet_hash_connect(&tcp_death_row, sk);
234 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
235 inet->inet_sport, inet->inet_dport, sk);
241 /* OK, now commit destination to socket. */
242 sk->sk_gso_type = SKB_GSO_TCPV4;
243 sk_setup_caps(sk, &rt->dst);
245 if (!tp->write_seq && likely(!tp->repair))
246 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
251 inet->inet_id = tp->write_seq ^ jiffies;
253 if (likely(!tp->repair))
254 err = tcp_connect(sk);
256 err = tcp_repair_connect(sk);
266 * This unhashes the socket and releases the local port,
269 tcp_set_state(sk, TCP_CLOSE);
271 sk->sk_route_caps = 0;
272 inet->inet_dport = 0;
275 EXPORT_SYMBOL(tcp_v4_connect);
278 * This routine does path mtu discovery as defined in RFC1191.
280 static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
282 struct dst_entry *dst;
283 struct inet_sock *inet = inet_sk(sk);
285 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
286 * send out by Linux are always <576bytes so they should go through
289 if (sk->sk_state == TCP_LISTEN)
292 dst = inet_csk_update_pmtu(sk, mtu);
296 /* Something is about to be wrong... Remember soft error
297 * for the case, if this connection will not able to recover.
299 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
300 sk->sk_err_soft = EMSGSIZE;
304 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
305 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
306 tcp_sync_mss(sk, mtu);
308 /* Resend the TCP packet because it's
309 * clear that the old packet has been
310 * dropped. This is the new "fast" path mtu
313 tcp_simple_retransmit(sk);
314 } /* else let the usual retransmit timer handle it */
317 static void do_redirect(struct sk_buff *skb, struct sock *sk)
319 struct dst_entry *dst = __sk_dst_check(sk, 0);
322 dst->ops->redirect(dst, sk, skb);
326 * This routine is called by the ICMP module when it gets some
327 * sort of error condition. If err < 0 then the socket should
328 * be closed and the error returned to the user. If err > 0
329 * it's just the icmp type << 8 | icmp code. After adjustment
330 * header points to the first 8 bytes of the tcp header. We need
331 * to find the appropriate port.
333 * The locking strategy used here is very "optimistic". When
334 * someone else accesses the socket the ICMP is just dropped
335 * and for some paths there is no check at all.
336 * A more general error queue to queue errors for later handling
337 * is probably better.
341 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
343 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
344 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
345 struct inet_connection_sock *icsk;
347 struct inet_sock *inet;
348 const int type = icmp_hdr(icmp_skb)->type;
349 const int code = icmp_hdr(icmp_skb)->code;
355 struct net *net = dev_net(icmp_skb->dev);
357 if (icmp_skb->len < (iph->ihl << 2) + 8) {
358 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
362 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
363 iph->saddr, th->source, inet_iif(icmp_skb));
365 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
368 if (sk->sk_state == TCP_TIME_WAIT) {
369 inet_twsk_put(inet_twsk(sk));
374 /* If too many ICMPs get dropped on busy
375 * servers this needs to be solved differently.
377 if (sock_owned_by_user(sk))
378 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
380 if (sk->sk_state == TCP_CLOSE)
383 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
384 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
390 seq = ntohl(th->seq);
391 if (sk->sk_state != TCP_LISTEN &&
392 !between(seq, tp->snd_una, tp->snd_nxt)) {
393 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
399 do_redirect(icmp_skb, sk);
401 case ICMP_SOURCE_QUENCH:
402 /* Just silently ignore these. */
404 case ICMP_PARAMETERPROB:
407 case ICMP_DEST_UNREACH:
408 if (code > NR_ICMP_UNREACH)
411 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
412 if (!sock_owned_by_user(sk))
413 do_pmtu_discovery(sk, iph, info);
417 err = icmp_err_convert[code].errno;
418 /* check if icmp_skb allows revert of backoff
419 * (see draft-zimmermann-tcp-lcd) */
420 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
422 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
426 if (sock_owned_by_user(sk))
429 icsk->icsk_backoff--;
430 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
431 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
434 skb = tcp_write_queue_head(sk);
437 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
438 tcp_time_stamp - TCP_SKB_CB(skb)->when);
441 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
442 remaining, TCP_RTO_MAX);
444 /* RTO revert clocked out retransmission.
445 * Will retransmit now */
446 tcp_retransmit_timer(sk);
450 case ICMP_TIME_EXCEEDED:
457 switch (sk->sk_state) {
458 struct request_sock *req, **prev;
460 if (sock_owned_by_user(sk))
463 req = inet_csk_search_req(sk, &prev, th->dest,
464 iph->daddr, iph->saddr);
468 /* ICMPs are not backlogged, hence we cannot get
469 an established socket here.
473 if (seq != tcp_rsk(req)->snt_isn) {
474 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
479 * Still in SYN_RECV, just remove it silently.
480 * There is no good way to pass the error to the newly
481 * created socket, and POSIX does not want network
482 * errors returned from accept().
484 inet_csk_reqsk_queue_drop(sk, req, prev);
488 case TCP_SYN_RECV: /* Cannot happen.
489 It can f.e. if SYNs crossed.
491 if (!sock_owned_by_user(sk)) {
494 sk->sk_error_report(sk);
498 sk->sk_err_soft = err;
503 /* If we've already connected we will keep trying
504 * until we time out, or the user gives up.
506 * rfc1122 4.2.3.9 allows to consider as hard errors
507 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
508 * but it is obsoleted by pmtu discovery).
510 * Note, that in modern internet, where routing is unreliable
511 * and in each dark corner broken firewalls sit, sending random
512 * errors ordered by their masters even this two messages finally lose
513 * their original sense (even Linux sends invalid PORT_UNREACHs)
515 * Now we are in compliance with RFCs.
520 if (!sock_owned_by_user(sk) && inet->recverr) {
522 sk->sk_error_report(sk);
523 } else { /* Only an error on timeout */
524 sk->sk_err_soft = err;
532 static void __tcp_v4_send_check(struct sk_buff *skb,
533 __be32 saddr, __be32 daddr)
535 struct tcphdr *th = tcp_hdr(skb);
537 if (skb->ip_summed == CHECKSUM_PARTIAL) {
538 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
539 skb->csum_start = skb_transport_header(skb) - skb->head;
540 skb->csum_offset = offsetof(struct tcphdr, check);
542 th->check = tcp_v4_check(skb->len, saddr, daddr,
549 /* This routine computes an IPv4 TCP checksum. */
550 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
552 const struct inet_sock *inet = inet_sk(sk);
554 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
556 EXPORT_SYMBOL(tcp_v4_send_check);
558 int tcp_v4_gso_send_check(struct sk_buff *skb)
560 const struct iphdr *iph;
563 if (!pskb_may_pull(skb, sizeof(*th)))
570 skb->ip_summed = CHECKSUM_PARTIAL;
571 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
576 * This routine will send an RST to the other tcp.
578 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
580 * Answer: if a packet caused RST, it is not for a socket
581 * existing in our system, if it is matched to a socket,
582 * it is just duplicate segment or bug in other side's TCP.
583 * So that we build reply only basing on parameters
584 * arrived with segment.
585 * Exception: precedence violation. We do not implement it in any case.
588 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
590 const struct tcphdr *th = tcp_hdr(skb);
593 #ifdef CONFIG_TCP_MD5SIG
594 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
597 struct ip_reply_arg arg;
598 #ifdef CONFIG_TCP_MD5SIG
599 struct tcp_md5sig_key *key;
600 const __u8 *hash_location = NULL;
601 unsigned char newhash[16];
603 struct sock *sk1 = NULL;
607 /* Never send a reset in response to a reset. */
611 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
614 /* Swap the send and the receive. */
615 memset(&rep, 0, sizeof(rep));
616 rep.th.dest = th->source;
617 rep.th.source = th->dest;
618 rep.th.doff = sizeof(struct tcphdr) / 4;
622 rep.th.seq = th->ack_seq;
625 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
626 skb->len - (th->doff << 2));
629 memset(&arg, 0, sizeof(arg));
630 arg.iov[0].iov_base = (unsigned char *)&rep;
631 arg.iov[0].iov_len = sizeof(rep.th);
633 #ifdef CONFIG_TCP_MD5SIG
634 hash_location = tcp_parse_md5sig_option(th);
635 if (!sk && hash_location) {
637 * active side is lost. Try to find listening socket through
638 * source port, and then find md5 key through listening socket.
639 * we are not loose security here:
640 * Incoming packet is checked with md5 hash with finding key,
641 * no RST generated if md5 hash doesn't match.
643 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
644 &tcp_hashinfo, ip_hdr(skb)->daddr,
645 ntohs(th->source), inet_iif(skb));
646 /* don't send rst if it can't find key */
650 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
651 &ip_hdr(skb)->saddr, AF_INET);
655 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
656 if (genhash || memcmp(hash_location, newhash, 16) != 0)
659 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
665 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
667 (TCPOPT_MD5SIG << 8) |
669 /* Update length and the length the header thinks exists */
670 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
671 rep.th.doff = arg.iov[0].iov_len / 4;
673 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
674 key, ip_hdr(skb)->saddr,
675 ip_hdr(skb)->daddr, &rep.th);
678 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
679 ip_hdr(skb)->saddr, /* XXX */
680 arg.iov[0].iov_len, IPPROTO_TCP, 0);
681 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
682 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
683 /* When socket is gone, all binding information is lost.
684 * routing might fail in this case. using iif for oif to
685 * make sure we can deliver it
687 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
689 net = dev_net(skb_dst(skb)->dev);
690 arg.tos = ip_hdr(skb)->tos;
691 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
692 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
694 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
695 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
697 #ifdef CONFIG_TCP_MD5SIG
706 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
707 outside socket context is ugly, certainly. What can I do?
710 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
711 u32 win, u32 ts, int oif,
712 struct tcp_md5sig_key *key,
713 int reply_flags, u8 tos)
715 const struct tcphdr *th = tcp_hdr(skb);
718 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
719 #ifdef CONFIG_TCP_MD5SIG
720 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
724 struct ip_reply_arg arg;
725 struct net *net = dev_net(skb_dst(skb)->dev);
727 memset(&rep.th, 0, sizeof(struct tcphdr));
728 memset(&arg, 0, sizeof(arg));
730 arg.iov[0].iov_base = (unsigned char *)&rep;
731 arg.iov[0].iov_len = sizeof(rep.th);
733 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
734 (TCPOPT_TIMESTAMP << 8) |
736 rep.opt[1] = htonl(tcp_time_stamp);
737 rep.opt[2] = htonl(ts);
738 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
741 /* Swap the send and the receive. */
742 rep.th.dest = th->source;
743 rep.th.source = th->dest;
744 rep.th.doff = arg.iov[0].iov_len / 4;
745 rep.th.seq = htonl(seq);
746 rep.th.ack_seq = htonl(ack);
748 rep.th.window = htons(win);
750 #ifdef CONFIG_TCP_MD5SIG
752 int offset = (ts) ? 3 : 0;
754 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
756 (TCPOPT_MD5SIG << 8) |
758 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
759 rep.th.doff = arg.iov[0].iov_len/4;
761 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
762 key, ip_hdr(skb)->saddr,
763 ip_hdr(skb)->daddr, &rep.th);
766 arg.flags = reply_flags;
767 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
768 ip_hdr(skb)->saddr, /* XXX */
769 arg.iov[0].iov_len, IPPROTO_TCP, 0);
770 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
772 arg.bound_dev_if = oif;
774 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
775 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
777 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
782 struct inet_timewait_sock *tw = inet_twsk(sk);
783 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
785 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
789 tcp_twsk_md5_key(tcptw),
790 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
797 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
798 struct request_sock *req)
800 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
801 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
804 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
806 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
811 * Send a SYN-ACK after having received a SYN.
812 * This still operates on a request_sock only, not on a big
815 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
816 struct request_sock *req,
817 struct request_values *rvp,
821 const struct inet_request_sock *ireq = inet_rsk(req);
824 struct sk_buff * skb;
826 /* First, grab a route. */
827 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req, nocache)) == NULL)
830 skb = tcp_make_synack(sk, dst, req, rvp);
833 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
835 skb_set_queue_mapping(skb, queue_mapping);
836 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
839 err = net_xmit_eval(err);
845 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
846 struct request_values *rvp)
848 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
849 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
853 * IPv4 request_sock destructor.
855 static void tcp_v4_reqsk_destructor(struct request_sock *req)
857 kfree(inet_rsk(req)->opt);
861 * Return true if a syncookie should be sent
863 bool tcp_syn_flood_action(struct sock *sk,
864 const struct sk_buff *skb,
867 const char *msg = "Dropping request";
868 bool want_cookie = false;
869 struct listen_sock *lopt;
873 #ifdef CONFIG_SYN_COOKIES
874 if (sysctl_tcp_syncookies) {
875 msg = "Sending cookies";
877 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
880 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
882 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
883 if (!lopt->synflood_warned) {
884 lopt->synflood_warned = 1;
885 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
886 proto, ntohs(tcp_hdr(skb)->dest), msg);
890 EXPORT_SYMBOL(tcp_syn_flood_action);
893 * Save and compile IPv4 options into the request_sock if needed.
895 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
898 const struct ip_options *opt = &(IPCB(skb)->opt);
899 struct ip_options_rcu *dopt = NULL;
901 if (opt && opt->optlen) {
902 int opt_size = sizeof(*dopt) + opt->optlen;
904 dopt = kmalloc(opt_size, GFP_ATOMIC);
906 if (ip_options_echo(&dopt->opt, skb)) {
915 #ifdef CONFIG_TCP_MD5SIG
917 * RFC2385 MD5 checksumming requires a mapping of
918 * IP address->MD5 Key.
919 * We need to maintain these in the sk structure.
922 /* Find the Key structure for an address. */
923 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
924 const union tcp_md5_addr *addr,
927 struct tcp_sock *tp = tcp_sk(sk);
928 struct tcp_md5sig_key *key;
929 struct hlist_node *pos;
930 unsigned int size = sizeof(struct in_addr);
931 struct tcp_md5sig_info *md5sig;
933 /* caller either holds rcu_read_lock() or socket lock */
934 md5sig = rcu_dereference_check(tp->md5sig_info,
935 sock_owned_by_user(sk) ||
936 lockdep_is_held(&sk->sk_lock.slock));
939 #if IS_ENABLED(CONFIG_IPV6)
940 if (family == AF_INET6)
941 size = sizeof(struct in6_addr);
943 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
944 if (key->family != family)
946 if (!memcmp(&key->addr, addr, size))
951 EXPORT_SYMBOL(tcp_md5_do_lookup);
953 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
954 struct sock *addr_sk)
956 union tcp_md5_addr *addr;
958 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
959 return tcp_md5_do_lookup(sk, addr, AF_INET);
961 EXPORT_SYMBOL(tcp_v4_md5_lookup);
963 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
964 struct request_sock *req)
966 union tcp_md5_addr *addr;
968 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
969 return tcp_md5_do_lookup(sk, addr, AF_INET);
972 /* This can be called on a newly created socket, from other files */
973 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
974 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
976 /* Add Key to the list */
977 struct tcp_md5sig_key *key;
978 struct tcp_sock *tp = tcp_sk(sk);
979 struct tcp_md5sig_info *md5sig;
981 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
983 /* Pre-existing entry - just update that one. */
984 memcpy(key->key, newkey, newkeylen);
985 key->keylen = newkeylen;
989 md5sig = rcu_dereference_protected(tp->md5sig_info,
990 sock_owned_by_user(sk));
992 md5sig = kmalloc(sizeof(*md5sig), gfp);
996 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
997 INIT_HLIST_HEAD(&md5sig->head);
998 rcu_assign_pointer(tp->md5sig_info, md5sig);
1001 key = sock_kmalloc(sk, sizeof(*key), gfp);
1004 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1005 sock_kfree_s(sk, key, sizeof(*key));
1009 memcpy(key->key, newkey, newkeylen);
1010 key->keylen = newkeylen;
1011 key->family = family;
1012 memcpy(&key->addr, addr,
1013 (family == AF_INET6) ? sizeof(struct in6_addr) :
1014 sizeof(struct in_addr));
1015 hlist_add_head_rcu(&key->node, &md5sig->head);
1018 EXPORT_SYMBOL(tcp_md5_do_add);
1020 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1022 struct tcp_sock *tp = tcp_sk(sk);
1023 struct tcp_md5sig_key *key;
1024 struct tcp_md5sig_info *md5sig;
1026 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1029 hlist_del_rcu(&key->node);
1030 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1031 kfree_rcu(key, rcu);
1032 md5sig = rcu_dereference_protected(tp->md5sig_info,
1033 sock_owned_by_user(sk));
1034 if (hlist_empty(&md5sig->head))
1035 tcp_free_md5sig_pool();
1038 EXPORT_SYMBOL(tcp_md5_do_del);
1040 void tcp_clear_md5_list(struct sock *sk)
1042 struct tcp_sock *tp = tcp_sk(sk);
1043 struct tcp_md5sig_key *key;
1044 struct hlist_node *pos, *n;
1045 struct tcp_md5sig_info *md5sig;
1047 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1049 if (!hlist_empty(&md5sig->head))
1050 tcp_free_md5sig_pool();
1051 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1052 hlist_del_rcu(&key->node);
1053 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1054 kfree_rcu(key, rcu);
1058 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1061 struct tcp_md5sig cmd;
1062 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1064 if (optlen < sizeof(cmd))
1067 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1070 if (sin->sin_family != AF_INET)
1073 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1074 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1077 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1080 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1081 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1085 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1086 __be32 daddr, __be32 saddr, int nbytes)
1088 struct tcp4_pseudohdr *bp;
1089 struct scatterlist sg;
1091 bp = &hp->md5_blk.ip4;
1094 * 1. the TCP pseudo-header (in the order: source IP address,
1095 * destination IP address, zero-padded protocol number, and
1101 bp->protocol = IPPROTO_TCP;
1102 bp->len = cpu_to_be16(nbytes);
1104 sg_init_one(&sg, bp, sizeof(*bp));
1105 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1108 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1109 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1111 struct tcp_md5sig_pool *hp;
1112 struct hash_desc *desc;
1114 hp = tcp_get_md5sig_pool();
1116 goto clear_hash_noput;
1117 desc = &hp->md5_desc;
1119 if (crypto_hash_init(desc))
1121 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1123 if (tcp_md5_hash_header(hp, th))
1125 if (tcp_md5_hash_key(hp, key))
1127 if (crypto_hash_final(desc, md5_hash))
1130 tcp_put_md5sig_pool();
1134 tcp_put_md5sig_pool();
1136 memset(md5_hash, 0, 16);
1140 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1141 const struct sock *sk, const struct request_sock *req,
1142 const struct sk_buff *skb)
1144 struct tcp_md5sig_pool *hp;
1145 struct hash_desc *desc;
1146 const struct tcphdr *th = tcp_hdr(skb);
1147 __be32 saddr, daddr;
1150 saddr = inet_sk(sk)->inet_saddr;
1151 daddr = inet_sk(sk)->inet_daddr;
1153 saddr = inet_rsk(req)->loc_addr;
1154 daddr = inet_rsk(req)->rmt_addr;
1156 const struct iphdr *iph = ip_hdr(skb);
1161 hp = tcp_get_md5sig_pool();
1163 goto clear_hash_noput;
1164 desc = &hp->md5_desc;
1166 if (crypto_hash_init(desc))
1169 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1171 if (tcp_md5_hash_header(hp, th))
1173 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1175 if (tcp_md5_hash_key(hp, key))
1177 if (crypto_hash_final(desc, md5_hash))
1180 tcp_put_md5sig_pool();
1184 tcp_put_md5sig_pool();
1186 memset(md5_hash, 0, 16);
1189 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1191 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1194 * This gets called for each TCP segment that arrives
1195 * so we want to be efficient.
1196 * We have 3 drop cases:
1197 * o No MD5 hash and one expected.
1198 * o MD5 hash and we're not expecting one.
1199 * o MD5 hash and its wrong.
1201 const __u8 *hash_location = NULL;
1202 struct tcp_md5sig_key *hash_expected;
1203 const struct iphdr *iph = ip_hdr(skb);
1204 const struct tcphdr *th = tcp_hdr(skb);
1206 unsigned char newhash[16];
1208 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1210 hash_location = tcp_parse_md5sig_option(th);
1212 /* We've parsed the options - do we have a hash? */
1213 if (!hash_expected && !hash_location)
1216 if (hash_expected && !hash_location) {
1217 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1221 if (!hash_expected && hash_location) {
1222 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1226 /* Okay, so this is hash_expected and hash_location -
1227 * so we need to calculate the checksum.
1229 genhash = tcp_v4_md5_hash_skb(newhash,
1233 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1234 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1235 &iph->saddr, ntohs(th->source),
1236 &iph->daddr, ntohs(th->dest),
1237 genhash ? " tcp_v4_calc_md5_hash failed"
1246 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1248 .obj_size = sizeof(struct tcp_request_sock),
1249 .rtx_syn_ack = tcp_v4_rtx_synack,
1250 .send_ack = tcp_v4_reqsk_send_ack,
1251 .destructor = tcp_v4_reqsk_destructor,
1252 .send_reset = tcp_v4_send_reset,
1253 .syn_ack_timeout = tcp_syn_ack_timeout,
1256 #ifdef CONFIG_TCP_MD5SIG
1257 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1258 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1259 .calc_md5_hash = tcp_v4_md5_hash_skb,
1263 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1265 struct tcp_extend_values tmp_ext;
1266 struct tcp_options_received tmp_opt;
1267 const u8 *hash_location;
1268 struct request_sock *req;
1269 struct inet_request_sock *ireq;
1270 struct tcp_sock *tp = tcp_sk(sk);
1271 struct dst_entry *dst = NULL;
1272 __be32 saddr = ip_hdr(skb)->saddr;
1273 __be32 daddr = ip_hdr(skb)->daddr;
1274 __u32 isn = TCP_SKB_CB(skb)->when;
1275 bool want_cookie = false;
1277 /* Never answer to SYNs send to broadcast or multicast */
1278 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1281 /* TW buckets are converted to open requests without
1282 * limitations, they conserve resources and peer is
1283 * evidently real one.
1285 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1286 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1291 /* Accept backlog is full. If we have already queued enough
1292 * of warm entries in syn queue, drop request. It is better than
1293 * clogging syn queue with openreqs with exponentially increasing
1296 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1299 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1303 #ifdef CONFIG_TCP_MD5SIG
1304 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1307 tcp_clear_options(&tmp_opt);
1308 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1309 tmp_opt.user_mss = tp->rx_opt.user_mss;
1310 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
1312 if (tmp_opt.cookie_plus > 0 &&
1313 tmp_opt.saw_tstamp &&
1314 !tp->rx_opt.cookie_out_never &&
1315 (sysctl_tcp_cookie_size > 0 ||
1316 (tp->cookie_values != NULL &&
1317 tp->cookie_values->cookie_desired > 0))) {
1319 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1320 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1322 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1323 goto drop_and_release;
1325 /* Secret recipe starts with IP addresses */
1326 *mess++ ^= (__force u32)daddr;
1327 *mess++ ^= (__force u32)saddr;
1329 /* plus variable length Initiator Cookie */
1332 *c++ ^= *hash_location++;
1334 want_cookie = false; /* not our kind of cookie */
1335 tmp_ext.cookie_out_never = 0; /* false */
1336 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1337 } else if (!tp->rx_opt.cookie_in_always) {
1338 /* redundant indications, but ensure initialization. */
1339 tmp_ext.cookie_out_never = 1; /* true */
1340 tmp_ext.cookie_plus = 0;
1342 goto drop_and_release;
1344 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1346 if (want_cookie && !tmp_opt.saw_tstamp)
1347 tcp_clear_options(&tmp_opt);
1349 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1350 tcp_openreq_init(req, &tmp_opt, skb);
1352 ireq = inet_rsk(req);
1353 ireq->loc_addr = daddr;
1354 ireq->rmt_addr = saddr;
1355 ireq->no_srccheck = inet_sk(sk)->transparent;
1356 ireq->opt = tcp_v4_save_options(sk, skb);
1358 if (security_inet_conn_request(sk, skb, req))
1361 if (!want_cookie || tmp_opt.tstamp_ok)
1362 TCP_ECN_create_request(req, skb);
1365 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1366 req->cookie_ts = tmp_opt.tstamp_ok;
1370 /* VJ's idea. We save last timestamp seen
1371 * from the destination in peer table, when entering
1372 * state TIME-WAIT, and check against it before
1373 * accepting new connection request.
1375 * If "isn" is not zero, this request hit alive
1376 * timewait bucket, so that all the necessary checks
1377 * are made in the function processing timewait state.
1379 if (tmp_opt.saw_tstamp &&
1380 tcp_death_row.sysctl_tw_recycle &&
1381 (dst = inet_csk_route_req(sk, &fl4, req, want_cookie)) != NULL &&
1382 fl4.daddr == saddr) {
1383 if (!tcp_peer_is_proven(req, dst, true)) {
1384 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1385 goto drop_and_release;
1388 /* Kill the following clause, if you dislike this way. */
1389 else if (!sysctl_tcp_syncookies &&
1390 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1391 (sysctl_max_syn_backlog >> 2)) &&
1392 !tcp_peer_is_proven(req, dst, false)) {
1393 /* Without syncookies last quarter of
1394 * backlog is filled with destinations,
1395 * proven to be alive.
1396 * It means that we continue to communicate
1397 * to destinations, already remembered
1398 * to the moment of synflood.
1400 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1401 &saddr, ntohs(tcp_hdr(skb)->source));
1402 goto drop_and_release;
1405 isn = tcp_v4_init_sequence(skb);
1407 tcp_rsk(req)->snt_isn = isn;
1408 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1410 if (tcp_v4_send_synack(sk, dst, req,
1411 (struct request_values *)&tmp_ext,
1412 skb_get_queue_mapping(skb),
1417 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1427 EXPORT_SYMBOL(tcp_v4_conn_request);
1431 * The three way handshake has completed - we got a valid synack -
1432 * now create the new socket.
1434 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1435 struct request_sock *req,
1436 struct dst_entry *dst)
1438 struct inet_request_sock *ireq;
1439 struct inet_sock *newinet;
1440 struct tcp_sock *newtp;
1442 #ifdef CONFIG_TCP_MD5SIG
1443 struct tcp_md5sig_key *key;
1445 struct ip_options_rcu *inet_opt;
1447 if (sk_acceptq_is_full(sk))
1450 newsk = tcp_create_openreq_child(sk, req, skb);
1454 newsk->sk_gso_type = SKB_GSO_TCPV4;
1456 newtp = tcp_sk(newsk);
1457 newinet = inet_sk(newsk);
1458 ireq = inet_rsk(req);
1459 newinet->inet_daddr = ireq->rmt_addr;
1460 newinet->inet_rcv_saddr = ireq->loc_addr;
1461 newinet->inet_saddr = ireq->loc_addr;
1462 inet_opt = ireq->opt;
1463 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1465 newinet->mc_index = inet_iif(skb);
1466 newinet->mc_ttl = ip_hdr(skb)->ttl;
1467 newinet->rcv_tos = ip_hdr(skb)->tos;
1468 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1470 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1471 newinet->inet_id = newtp->write_seq ^ jiffies;
1474 dst = inet_csk_route_child_sock(sk, newsk, req);
1478 /* syncookie case : see end of cookie_v4_check() */
1480 sk_setup_caps(newsk, dst);
1482 tcp_mtup_init(newsk);
1483 tcp_sync_mss(newsk, dst_mtu(dst));
1484 newtp->advmss = dst_metric_advmss(dst);
1485 if (tcp_sk(sk)->rx_opt.user_mss &&
1486 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1487 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1489 tcp_initialize_rcv_mss(newsk);
1490 if (tcp_rsk(req)->snt_synack)
1491 tcp_valid_rtt_meas(newsk,
1492 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1493 newtp->total_retrans = req->retrans;
1495 #ifdef CONFIG_TCP_MD5SIG
1496 /* Copy over the MD5 key from the original socket */
1497 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1501 * We're using one, so create a matching key
1502 * on the newsk structure. If we fail to get
1503 * memory, then we end up not copying the key
1506 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1507 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1508 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1512 if (__inet_inherit_port(sk, newsk) < 0)
1514 __inet_hash_nolisten(newsk, NULL);
1519 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1523 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1526 tcp_clear_xmit_timers(newsk);
1527 tcp_cleanup_congestion_control(newsk);
1528 bh_unlock_sock(newsk);
1532 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1534 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1536 struct tcphdr *th = tcp_hdr(skb);
1537 const struct iphdr *iph = ip_hdr(skb);
1539 struct request_sock **prev;
1540 /* Find possible connection requests. */
1541 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1542 iph->saddr, iph->daddr);
1544 return tcp_check_req(sk, skb, req, prev);
1546 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1547 th->source, iph->daddr, th->dest, inet_iif(skb));
1550 if (nsk->sk_state != TCP_TIME_WAIT) {
1554 inet_twsk_put(inet_twsk(nsk));
1558 #ifdef CONFIG_SYN_COOKIES
1560 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1565 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1567 const struct iphdr *iph = ip_hdr(skb);
1569 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1570 if (!tcp_v4_check(skb->len, iph->saddr,
1571 iph->daddr, skb->csum)) {
1572 skb->ip_summed = CHECKSUM_UNNECESSARY;
1577 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1578 skb->len, IPPROTO_TCP, 0);
1580 if (skb->len <= 76) {
1581 return __skb_checksum_complete(skb);
1587 /* The socket must have it's spinlock held when we get
1590 * We have a potential double-lock case here, so even when
1591 * doing backlog processing we use the BH locking scheme.
1592 * This is because we cannot sleep with the original spinlock
1595 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1598 #ifdef CONFIG_TCP_MD5SIG
1600 * We really want to reject the packet as early as possible
1602 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1603 * o There is an MD5 option and we're not expecting one
1605 if (tcp_v4_inbound_md5_hash(sk, skb))
1609 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1610 sock_rps_save_rxhash(sk, skb);
1611 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1618 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1621 if (sk->sk_state == TCP_LISTEN) {
1622 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1627 sock_rps_save_rxhash(nsk, skb);
1628 if (tcp_child_process(sk, nsk, skb)) {
1635 sock_rps_save_rxhash(sk, skb);
1637 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1644 tcp_v4_send_reset(rsk, skb);
1647 /* Be careful here. If this function gets more complicated and
1648 * gcc suffers from register pressure on the x86, sk (in %ebx)
1649 * might be destroyed here. This current version compiles correctly,
1650 * but you have been warned.
1655 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1658 EXPORT_SYMBOL(tcp_v4_do_rcv);
1660 void tcp_v4_early_demux(struct sk_buff *skb)
1662 struct net *net = dev_net(skb->dev);
1663 const struct iphdr *iph;
1664 const struct tcphdr *th;
1665 struct net_device *dev;
1668 if (skb->pkt_type != PACKET_HOST)
1671 if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
1675 th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1677 if (th->doff < sizeof(struct tcphdr) / 4)
1680 if (!pskb_may_pull(skb, ip_hdrlen(skb) + th->doff * 4))
1684 sk = __inet_lookup_established(net, &tcp_hashinfo,
1685 iph->saddr, th->source,
1686 iph->daddr, ntohs(th->dest),
1690 skb->destructor = sock_edemux;
1691 if (sk->sk_state != TCP_TIME_WAIT) {
1692 struct dst_entry *dst = sk->sk_rx_dst;
1694 dst = dst_check(dst, 0);
1696 struct rtable *rt = (struct rtable *) dst;
1698 if (rt->rt_iif == dev->ifindex)
1699 skb_dst_set_noref(skb, dst);
1709 int tcp_v4_rcv(struct sk_buff *skb)
1711 const struct iphdr *iph;
1712 const struct tcphdr *th;
1715 struct net *net = dev_net(skb->dev);
1717 if (skb->pkt_type != PACKET_HOST)
1720 /* Count it even if it's bad */
1721 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1723 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1728 if (th->doff < sizeof(struct tcphdr) / 4)
1730 if (!pskb_may_pull(skb, th->doff * 4))
1733 /* An explanation is required here, I think.
1734 * Packet length and doff are validated by header prediction,
1735 * provided case of th->doff==0 is eliminated.
1736 * So, we defer the checks. */
1737 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1742 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1743 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1744 skb->len - th->doff * 4);
1745 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1746 TCP_SKB_CB(skb)->when = 0;
1747 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1748 TCP_SKB_CB(skb)->sacked = 0;
1750 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1755 if (sk->sk_state == TCP_TIME_WAIT)
1758 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1759 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1760 goto discard_and_relse;
1763 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1764 goto discard_and_relse;
1767 if (sk_filter(sk, skb))
1768 goto discard_and_relse;
1772 bh_lock_sock_nested(sk);
1774 if (!sock_owned_by_user(sk)) {
1775 #ifdef CONFIG_NET_DMA
1776 struct tcp_sock *tp = tcp_sk(sk);
1777 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1778 tp->ucopy.dma_chan = net_dma_find_channel();
1779 if (tp->ucopy.dma_chan)
1780 ret = tcp_v4_do_rcv(sk, skb);
1784 if (!tcp_prequeue(sk, skb))
1785 ret = tcp_v4_do_rcv(sk, skb);
1787 } else if (unlikely(sk_add_backlog(sk, skb,
1788 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1790 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1791 goto discard_and_relse;
1800 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1803 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1805 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1807 tcp_v4_send_reset(NULL, skb);
1811 /* Discard frame. */
1820 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1821 inet_twsk_put(inet_twsk(sk));
1825 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1826 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1827 inet_twsk_put(inet_twsk(sk));
1830 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1832 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1834 iph->daddr, th->dest,
1837 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1838 inet_twsk_put(inet_twsk(sk));
1842 /* Fall through to ACK */
1845 tcp_v4_timewait_ack(sk, skb);
1849 case TCP_TW_SUCCESS:;
1854 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1855 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1856 .twsk_unique = tcp_twsk_unique,
1857 .twsk_destructor= tcp_twsk_destructor,
1860 const struct inet_connection_sock_af_ops ipv4_specific = {
1861 .queue_xmit = ip_queue_xmit,
1862 .send_check = tcp_v4_send_check,
1863 .rebuild_header = inet_sk_rebuild_header,
1864 .conn_request = tcp_v4_conn_request,
1865 .syn_recv_sock = tcp_v4_syn_recv_sock,
1866 .net_header_len = sizeof(struct iphdr),
1867 .setsockopt = ip_setsockopt,
1868 .getsockopt = ip_getsockopt,
1869 .addr2sockaddr = inet_csk_addr2sockaddr,
1870 .sockaddr_len = sizeof(struct sockaddr_in),
1871 .bind_conflict = inet_csk_bind_conflict,
1872 #ifdef CONFIG_COMPAT
1873 .compat_setsockopt = compat_ip_setsockopt,
1874 .compat_getsockopt = compat_ip_getsockopt,
1877 EXPORT_SYMBOL(ipv4_specific);
1879 #ifdef CONFIG_TCP_MD5SIG
1880 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1881 .md5_lookup = tcp_v4_md5_lookup,
1882 .calc_md5_hash = tcp_v4_md5_hash_skb,
1883 .md5_parse = tcp_v4_parse_md5_keys,
1887 /* NOTE: A lot of things set to zero explicitly by call to
1888 * sk_alloc() so need not be done here.
1890 static int tcp_v4_init_sock(struct sock *sk)
1892 struct inet_connection_sock *icsk = inet_csk(sk);
1896 icsk->icsk_af_ops = &ipv4_specific;
1898 #ifdef CONFIG_TCP_MD5SIG
1899 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1905 void tcp_v4_destroy_sock(struct sock *sk)
1907 struct tcp_sock *tp = tcp_sk(sk);
1909 tcp_clear_xmit_timers(sk);
1911 tcp_cleanup_congestion_control(sk);
1913 /* Cleanup up the write buffer. */
1914 tcp_write_queue_purge(sk);
1916 /* Cleans up our, hopefully empty, out_of_order_queue. */
1917 __skb_queue_purge(&tp->out_of_order_queue);
1919 #ifdef CONFIG_TCP_MD5SIG
1920 /* Clean up the MD5 key list, if any */
1921 if (tp->md5sig_info) {
1922 tcp_clear_md5_list(sk);
1923 kfree_rcu(tp->md5sig_info, rcu);
1924 tp->md5sig_info = NULL;
1928 #ifdef CONFIG_NET_DMA
1929 /* Cleans up our sk_async_wait_queue */
1930 __skb_queue_purge(&sk->sk_async_wait_queue);
1933 /* Clean prequeue, it must be empty really */
1934 __skb_queue_purge(&tp->ucopy.prequeue);
1936 /* Clean up a referenced TCP bind bucket. */
1937 if (inet_csk(sk)->icsk_bind_hash)
1941 * If sendmsg cached page exists, toss it.
1943 if (sk->sk_sndmsg_page) {
1944 __free_page(sk->sk_sndmsg_page);
1945 sk->sk_sndmsg_page = NULL;
1948 /* TCP Cookie Transactions */
1949 if (tp->cookie_values != NULL) {
1950 kref_put(&tp->cookie_values->kref,
1951 tcp_cookie_values_release);
1952 tp->cookie_values = NULL;
1955 sk_sockets_allocated_dec(sk);
1956 sock_release_memcg(sk);
1958 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1960 #ifdef CONFIG_PROC_FS
1961 /* Proc filesystem TCP sock list dumping. */
1963 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1965 return hlist_nulls_empty(head) ? NULL :
1966 list_entry(head->first, struct inet_timewait_sock, tw_node);
1969 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1971 return !is_a_nulls(tw->tw_node.next) ?
1972 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1976 * Get next listener socket follow cur. If cur is NULL, get first socket
1977 * starting from bucket given in st->bucket; when st->bucket is zero the
1978 * very first socket in the hash table is returned.
1980 static void *listening_get_next(struct seq_file *seq, void *cur)
1982 struct inet_connection_sock *icsk;
1983 struct hlist_nulls_node *node;
1984 struct sock *sk = cur;
1985 struct inet_listen_hashbucket *ilb;
1986 struct tcp_iter_state *st = seq->private;
1987 struct net *net = seq_file_net(seq);
1990 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1991 spin_lock_bh(&ilb->lock);
1992 sk = sk_nulls_head(&ilb->head);
1996 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2000 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2001 struct request_sock *req = cur;
2003 icsk = inet_csk(st->syn_wait_sk);
2007 if (req->rsk_ops->family == st->family) {
2013 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2016 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2018 sk = sk_nulls_next(st->syn_wait_sk);
2019 st->state = TCP_SEQ_STATE_LISTENING;
2020 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2022 icsk = inet_csk(sk);
2023 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2024 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2026 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2027 sk = sk_nulls_next(sk);
2030 sk_nulls_for_each_from(sk, node) {
2031 if (!net_eq(sock_net(sk), net))
2033 if (sk->sk_family == st->family) {
2037 icsk = inet_csk(sk);
2038 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2039 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2041 st->uid = sock_i_uid(sk);
2042 st->syn_wait_sk = sk;
2043 st->state = TCP_SEQ_STATE_OPENREQ;
2047 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2049 spin_unlock_bh(&ilb->lock);
2051 if (++st->bucket < INET_LHTABLE_SIZE) {
2052 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2053 spin_lock_bh(&ilb->lock);
2054 sk = sk_nulls_head(&ilb->head);
2062 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2064 struct tcp_iter_state *st = seq->private;
2069 rc = listening_get_next(seq, NULL);
2071 while (rc && *pos) {
2072 rc = listening_get_next(seq, rc);
2078 static inline bool empty_bucket(struct tcp_iter_state *st)
2080 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2081 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2085 * Get first established socket starting from bucket given in st->bucket.
2086 * If st->bucket is zero, the very first socket in the hash is returned.
2088 static void *established_get_first(struct seq_file *seq)
2090 struct tcp_iter_state *st = seq->private;
2091 struct net *net = seq_file_net(seq);
2095 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2097 struct hlist_nulls_node *node;
2098 struct inet_timewait_sock *tw;
2099 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2101 /* Lockless fast path for the common case of empty buckets */
2102 if (empty_bucket(st))
2106 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2107 if (sk->sk_family != st->family ||
2108 !net_eq(sock_net(sk), net)) {
2114 st->state = TCP_SEQ_STATE_TIME_WAIT;
2115 inet_twsk_for_each(tw, node,
2116 &tcp_hashinfo.ehash[st->bucket].twchain) {
2117 if (tw->tw_family != st->family ||
2118 !net_eq(twsk_net(tw), net)) {
2124 spin_unlock_bh(lock);
2125 st->state = TCP_SEQ_STATE_ESTABLISHED;
2131 static void *established_get_next(struct seq_file *seq, void *cur)
2133 struct sock *sk = cur;
2134 struct inet_timewait_sock *tw;
2135 struct hlist_nulls_node *node;
2136 struct tcp_iter_state *st = seq->private;
2137 struct net *net = seq_file_net(seq);
2142 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2146 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2153 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2154 st->state = TCP_SEQ_STATE_ESTABLISHED;
2156 /* Look for next non empty bucket */
2158 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2161 if (st->bucket > tcp_hashinfo.ehash_mask)
2164 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2165 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2167 sk = sk_nulls_next(sk);
2169 sk_nulls_for_each_from(sk, node) {
2170 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2174 st->state = TCP_SEQ_STATE_TIME_WAIT;
2175 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2183 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2185 struct tcp_iter_state *st = seq->private;
2189 rc = established_get_first(seq);
2192 rc = established_get_next(seq, rc);
2198 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2201 struct tcp_iter_state *st = seq->private;
2203 st->state = TCP_SEQ_STATE_LISTENING;
2204 rc = listening_get_idx(seq, &pos);
2207 st->state = TCP_SEQ_STATE_ESTABLISHED;
2208 rc = established_get_idx(seq, pos);
2214 static void *tcp_seek_last_pos(struct seq_file *seq)
2216 struct tcp_iter_state *st = seq->private;
2217 int offset = st->offset;
2218 int orig_num = st->num;
2221 switch (st->state) {
2222 case TCP_SEQ_STATE_OPENREQ:
2223 case TCP_SEQ_STATE_LISTENING:
2224 if (st->bucket >= INET_LHTABLE_SIZE)
2226 st->state = TCP_SEQ_STATE_LISTENING;
2227 rc = listening_get_next(seq, NULL);
2228 while (offset-- && rc)
2229 rc = listening_get_next(seq, rc);
2234 case TCP_SEQ_STATE_ESTABLISHED:
2235 case TCP_SEQ_STATE_TIME_WAIT:
2236 st->state = TCP_SEQ_STATE_ESTABLISHED;
2237 if (st->bucket > tcp_hashinfo.ehash_mask)
2239 rc = established_get_first(seq);
2240 while (offset-- && rc)
2241 rc = established_get_next(seq, rc);
2249 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2251 struct tcp_iter_state *st = seq->private;
2254 if (*pos && *pos == st->last_pos) {
2255 rc = tcp_seek_last_pos(seq);
2260 st->state = TCP_SEQ_STATE_LISTENING;
2264 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2267 st->last_pos = *pos;
2271 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2273 struct tcp_iter_state *st = seq->private;
2276 if (v == SEQ_START_TOKEN) {
2277 rc = tcp_get_idx(seq, 0);
2281 switch (st->state) {
2282 case TCP_SEQ_STATE_OPENREQ:
2283 case TCP_SEQ_STATE_LISTENING:
2284 rc = listening_get_next(seq, v);
2286 st->state = TCP_SEQ_STATE_ESTABLISHED;
2289 rc = established_get_first(seq);
2292 case TCP_SEQ_STATE_ESTABLISHED:
2293 case TCP_SEQ_STATE_TIME_WAIT:
2294 rc = established_get_next(seq, v);
2299 st->last_pos = *pos;
2303 static void tcp_seq_stop(struct seq_file *seq, void *v)
2305 struct tcp_iter_state *st = seq->private;
2307 switch (st->state) {
2308 case TCP_SEQ_STATE_OPENREQ:
2310 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2311 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2313 case TCP_SEQ_STATE_LISTENING:
2314 if (v != SEQ_START_TOKEN)
2315 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2317 case TCP_SEQ_STATE_TIME_WAIT:
2318 case TCP_SEQ_STATE_ESTABLISHED:
2320 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2325 int tcp_seq_open(struct inode *inode, struct file *file)
2327 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2328 struct tcp_iter_state *s;
2331 err = seq_open_net(inode, file, &afinfo->seq_ops,
2332 sizeof(struct tcp_iter_state));
2336 s = ((struct seq_file *)file->private_data)->private;
2337 s->family = afinfo->family;
2341 EXPORT_SYMBOL(tcp_seq_open);
2343 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2346 struct proc_dir_entry *p;
2348 afinfo->seq_ops.start = tcp_seq_start;
2349 afinfo->seq_ops.next = tcp_seq_next;
2350 afinfo->seq_ops.stop = tcp_seq_stop;
2352 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2353 afinfo->seq_fops, afinfo);
2358 EXPORT_SYMBOL(tcp_proc_register);
2360 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2362 proc_net_remove(net, afinfo->name);
2364 EXPORT_SYMBOL(tcp_proc_unregister);
2366 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2367 struct seq_file *f, int i, int uid, int *len)
2369 const struct inet_request_sock *ireq = inet_rsk(req);
2370 int ttd = req->expires - jiffies;
2372 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2373 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2376 ntohs(inet_sk(sk)->inet_sport),
2378 ntohs(ireq->rmt_port),
2380 0, 0, /* could print option size, but that is af dependent. */
2381 1, /* timers active (only the expire timer) */
2382 jiffies_to_clock_t(ttd),
2385 0, /* non standard timer */
2386 0, /* open_requests have no inode */
2387 atomic_read(&sk->sk_refcnt),
2392 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2395 unsigned long timer_expires;
2396 const struct tcp_sock *tp = tcp_sk(sk);
2397 const struct inet_connection_sock *icsk = inet_csk(sk);
2398 const struct inet_sock *inet = inet_sk(sk);
2399 __be32 dest = inet->inet_daddr;
2400 __be32 src = inet->inet_rcv_saddr;
2401 __u16 destp = ntohs(inet->inet_dport);
2402 __u16 srcp = ntohs(inet->inet_sport);
2405 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2407 timer_expires = icsk->icsk_timeout;
2408 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2410 timer_expires = icsk->icsk_timeout;
2411 } else if (timer_pending(&sk->sk_timer)) {
2413 timer_expires = sk->sk_timer.expires;
2416 timer_expires = jiffies;
2419 if (sk->sk_state == TCP_LISTEN)
2420 rx_queue = sk->sk_ack_backlog;
2423 * because we dont lock socket, we might find a transient negative value
2425 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2427 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2428 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2429 i, src, srcp, dest, destp, sk->sk_state,
2430 tp->write_seq - tp->snd_una,
2433 jiffies_to_clock_t(timer_expires - jiffies),
2434 icsk->icsk_retransmits,
2436 icsk->icsk_probes_out,
2438 atomic_read(&sk->sk_refcnt), sk,
2439 jiffies_to_clock_t(icsk->icsk_rto),
2440 jiffies_to_clock_t(icsk->icsk_ack.ato),
2441 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2443 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2447 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2448 struct seq_file *f, int i, int *len)
2452 int ttd = tw->tw_ttd - jiffies;
2457 dest = tw->tw_daddr;
2458 src = tw->tw_rcv_saddr;
2459 destp = ntohs(tw->tw_dport);
2460 srcp = ntohs(tw->tw_sport);
2462 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2463 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2464 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2465 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2466 atomic_read(&tw->tw_refcnt), tw, len);
2471 static int tcp4_seq_show(struct seq_file *seq, void *v)
2473 struct tcp_iter_state *st;
2476 if (v == SEQ_START_TOKEN) {
2477 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2478 " sl local_address rem_address st tx_queue "
2479 "rx_queue tr tm->when retrnsmt uid timeout "
2485 switch (st->state) {
2486 case TCP_SEQ_STATE_LISTENING:
2487 case TCP_SEQ_STATE_ESTABLISHED:
2488 get_tcp4_sock(v, seq, st->num, &len);
2490 case TCP_SEQ_STATE_OPENREQ:
2491 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2493 case TCP_SEQ_STATE_TIME_WAIT:
2494 get_timewait4_sock(v, seq, st->num, &len);
2497 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2502 static const struct file_operations tcp_afinfo_seq_fops = {
2503 .owner = THIS_MODULE,
2504 .open = tcp_seq_open,
2506 .llseek = seq_lseek,
2507 .release = seq_release_net
2510 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2513 .seq_fops = &tcp_afinfo_seq_fops,
2515 .show = tcp4_seq_show,
2519 static int __net_init tcp4_proc_init_net(struct net *net)
2521 return tcp_proc_register(net, &tcp4_seq_afinfo);
2524 static void __net_exit tcp4_proc_exit_net(struct net *net)
2526 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2529 static struct pernet_operations tcp4_net_ops = {
2530 .init = tcp4_proc_init_net,
2531 .exit = tcp4_proc_exit_net,
2534 int __init tcp4_proc_init(void)
2536 return register_pernet_subsys(&tcp4_net_ops);
2539 void tcp4_proc_exit(void)
2541 unregister_pernet_subsys(&tcp4_net_ops);
2543 #endif /* CONFIG_PROC_FS */
2545 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2547 const struct iphdr *iph = skb_gro_network_header(skb);
2549 switch (skb->ip_summed) {
2550 case CHECKSUM_COMPLETE:
2551 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2553 skb->ip_summed = CHECKSUM_UNNECESSARY;
2559 NAPI_GRO_CB(skb)->flush = 1;
2563 return tcp_gro_receive(head, skb);
2566 int tcp4_gro_complete(struct sk_buff *skb)
2568 const struct iphdr *iph = ip_hdr(skb);
2569 struct tcphdr *th = tcp_hdr(skb);
2571 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2572 iph->saddr, iph->daddr, 0);
2573 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2575 return tcp_gro_complete(skb);
2578 struct proto tcp_prot = {
2580 .owner = THIS_MODULE,
2582 .connect = tcp_v4_connect,
2583 .disconnect = tcp_disconnect,
2584 .accept = inet_csk_accept,
2586 .init = tcp_v4_init_sock,
2587 .destroy = tcp_v4_destroy_sock,
2588 .shutdown = tcp_shutdown,
2589 .setsockopt = tcp_setsockopt,
2590 .getsockopt = tcp_getsockopt,
2591 .recvmsg = tcp_recvmsg,
2592 .sendmsg = tcp_sendmsg,
2593 .sendpage = tcp_sendpage,
2594 .backlog_rcv = tcp_v4_do_rcv,
2595 .release_cb = tcp_release_cb,
2597 .unhash = inet_unhash,
2598 .get_port = inet_csk_get_port,
2599 .enter_memory_pressure = tcp_enter_memory_pressure,
2600 .sockets_allocated = &tcp_sockets_allocated,
2601 .orphan_count = &tcp_orphan_count,
2602 .memory_allocated = &tcp_memory_allocated,
2603 .memory_pressure = &tcp_memory_pressure,
2604 .sysctl_wmem = sysctl_tcp_wmem,
2605 .sysctl_rmem = sysctl_tcp_rmem,
2606 .max_header = MAX_TCP_HEADER,
2607 .obj_size = sizeof(struct tcp_sock),
2608 .slab_flags = SLAB_DESTROY_BY_RCU,
2609 .twsk_prot = &tcp_timewait_sock_ops,
2610 .rsk_prot = &tcp_request_sock_ops,
2611 .h.hashinfo = &tcp_hashinfo,
2612 .no_autobind = true,
2613 #ifdef CONFIG_COMPAT
2614 .compat_setsockopt = compat_tcp_setsockopt,
2615 .compat_getsockopt = compat_tcp_getsockopt,
2617 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2618 .init_cgroup = tcp_init_cgroup,
2619 .destroy_cgroup = tcp_destroy_cgroup,
2620 .proto_cgroup = tcp_proto_cgroup,
2623 EXPORT_SYMBOL(tcp_prot);
2625 static int __net_init tcp_sk_init(struct net *net)
2630 static void __net_exit tcp_sk_exit(struct net *net)
2634 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2636 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2639 static struct pernet_operations __net_initdata tcp_sk_ops = {
2640 .init = tcp_sk_init,
2641 .exit = tcp_sk_exit,
2642 .exit_batch = tcp_sk_exit_batch,
2645 void __init tcp_v4_init(void)
2647 inet_hashinfo_init(&tcp_hashinfo);
2648 if (register_pernet_subsys(&tcp_sk_ops))
2649 panic("Failed to create the TCP control socket.\n");