2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
76 #include <linux/inet.h>
77 #include <linux/ipv6.h>
78 #include <linux/stddef.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
82 #include <linux/crypto.h>
83 #include <linux/scatterlist.h>
85 int sysctl_tcp_tw_reuse __read_mostly;
86 int sysctl_tcp_low_latency __read_mostly;
87 EXPORT_SYMBOL(sysctl_tcp_low_latency);
90 #ifdef CONFIG_TCP_MD5SIG
91 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, struct tcphdr *th);
97 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
103 struct inet_hashinfo tcp_hashinfo;
104 EXPORT_SYMBOL(tcp_hashinfo);
106 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
108 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
111 tcp_hdr(skb)->source);
114 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
116 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
117 struct tcp_sock *tp = tcp_sk(sk);
119 /* With PAWS, it is safe from the viewpoint
120 of data integrity. Even without PAWS it is safe provided sequence
121 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
123 Actually, the idea is close to VJ's one, only timestamp cache is
124 held not per host, but per port pair and TW bucket is used as state
127 If TW bucket has been already destroyed we fall back to VJ's scheme
128 and use initial timestamp retrieved from peer table.
130 if (tcptw->tw_ts_recent_stamp &&
131 (twp == NULL || (sysctl_tcp_tw_reuse &&
132 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
133 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
134 if (tp->write_seq == 0)
136 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
137 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
144 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
146 /* This will initiate an outgoing connection. */
147 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
149 struct inet_sock *inet = inet_sk(sk);
150 struct tcp_sock *tp = tcp_sk(sk);
151 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 __be32 daddr, nexthop;
157 if (addr_len < sizeof(struct sockaddr_in))
160 if (usin->sin_family != AF_INET)
161 return -EAFNOSUPPORT;
163 nexthop = daddr = usin->sin_addr.s_addr;
164 if (inet->opt && inet->opt->srr) {
167 nexthop = inet->opt->faddr;
170 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
171 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 inet->inet_sport, usin->sin_port, sk, 1);
175 if (tmp == -ENETUNREACH)
176 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
180 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
185 if (!inet->opt || !inet->opt->srr)
188 if (!inet->inet_saddr)
189 inet->inet_saddr = rt->rt_src;
190 inet->inet_rcv_saddr = inet->inet_saddr;
192 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
193 /* Reset inherited state */
194 tp->rx_opt.ts_recent = 0;
195 tp->rx_opt.ts_recent_stamp = 0;
199 if (tcp_death_row.sysctl_tw_recycle &&
200 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
201 struct inet_peer *peer = rt_get_peer(rt);
203 * VJ's idea. We save last timestamp seen from
204 * the destination in peer table, when entering state
205 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
206 * when trying new connection.
209 inet_peer_refcheck(peer);
210 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
211 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
212 tp->rx_opt.ts_recent = peer->tcp_ts;
217 inet->inet_dport = usin->sin_port;
218 inet->inet_daddr = daddr;
220 inet_csk(sk)->icsk_ext_hdr_len = 0;
222 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
224 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
226 /* Socket identity is still unknown (sport may be zero).
227 * However we set state to SYN-SENT and not releasing socket
228 * lock select source port, enter ourselves into the hash tables and
229 * complete initialization after this.
231 tcp_set_state(sk, TCP_SYN_SENT);
232 err = inet_hash_connect(&tcp_death_row, sk);
236 err = ip_route_newports(&rt, IPPROTO_TCP,
237 inet->inet_sport, inet->inet_dport, sk);
241 /* OK, now commit destination to socket. */
242 sk->sk_gso_type = SKB_GSO_TCPV4;
243 sk_setup_caps(sk, &rt->dst);
246 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
251 inet->inet_id = tp->write_seq ^ jiffies;
253 err = tcp_connect(sk);
262 * This unhashes the socket and releases the local port,
265 tcp_set_state(sk, TCP_CLOSE);
267 sk->sk_route_caps = 0;
268 inet->inet_dport = 0;
271 EXPORT_SYMBOL(tcp_v4_connect);
274 * This routine does path mtu discovery as defined in RFC1191.
276 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
278 struct dst_entry *dst;
279 struct inet_sock *inet = inet_sk(sk);
281 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
282 * send out by Linux are always <576bytes so they should go through
285 if (sk->sk_state == TCP_LISTEN)
288 /* We don't check in the destentry if pmtu discovery is forbidden
289 * on this route. We just assume that no packet_to_big packets
290 * are send back when pmtu discovery is not active.
291 * There is a small race when the user changes this flag in the
292 * route, but I think that's acceptable.
294 if ((dst = __sk_dst_check(sk, 0)) == NULL)
297 dst->ops->update_pmtu(dst, mtu);
299 /* Something is about to be wrong... Remember soft error
300 * for the case, if this connection will not able to recover.
302 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303 sk->sk_err_soft = EMSGSIZE;
307 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
308 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
309 tcp_sync_mss(sk, mtu);
311 /* Resend the TCP packet because it's
312 * clear that the old packet has been
313 * dropped. This is the new "fast" path mtu
316 tcp_simple_retransmit(sk);
317 } /* else let the usual retransmit timer handle it */
321 * This routine is called by the ICMP module when it gets some
322 * sort of error condition. If err < 0 then the socket should
323 * be closed and the error returned to the user. If err > 0
324 * it's just the icmp type << 8 | icmp code. After adjustment
325 * header points to the first 8 bytes of the tcp header. We need
326 * to find the appropriate port.
328 * The locking strategy used here is very "optimistic". When
329 * someone else accesses the socket the ICMP is just dropped
330 * and for some paths there is no check at all.
331 * A more general error queue to queue errors for later handling
332 * is probably better.
336 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
338 struct iphdr *iph = (struct iphdr *)icmp_skb->data;
339 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
340 struct inet_connection_sock *icsk;
342 struct inet_sock *inet;
343 const int type = icmp_hdr(icmp_skb)->type;
344 const int code = icmp_hdr(icmp_skb)->code;
350 struct net *net = dev_net(icmp_skb->dev);
352 if (icmp_skb->len < (iph->ihl << 2) + 8) {
353 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
357 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
358 iph->saddr, th->source, inet_iif(icmp_skb));
360 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
363 if (sk->sk_state == TCP_TIME_WAIT) {
364 inet_twsk_put(inet_twsk(sk));
369 /* If too many ICMPs get dropped on busy
370 * servers this needs to be solved differently.
372 if (sock_owned_by_user(sk))
373 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
375 if (sk->sk_state == TCP_CLOSE)
378 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
379 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
385 seq = ntohl(th->seq);
386 if (sk->sk_state != TCP_LISTEN &&
387 !between(seq, tp->snd_una, tp->snd_nxt)) {
388 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
393 case ICMP_SOURCE_QUENCH:
394 /* Just silently ignore these. */
396 case ICMP_PARAMETERPROB:
399 case ICMP_DEST_UNREACH:
400 if (code > NR_ICMP_UNREACH)
403 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
404 if (!sock_owned_by_user(sk))
405 do_pmtu_discovery(sk, iph, info);
409 err = icmp_err_convert[code].errno;
410 /* check if icmp_skb allows revert of backoff
411 * (see draft-zimmermann-tcp-lcd) */
412 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
414 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
418 if (sock_owned_by_user(sk))
421 icsk->icsk_backoff--;
422 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
426 skb = tcp_write_queue_head(sk);
429 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
430 tcp_time_stamp - TCP_SKB_CB(skb)->when);
433 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
434 remaining, TCP_RTO_MAX);
436 /* RTO revert clocked out retransmission.
437 * Will retransmit now */
438 tcp_retransmit_timer(sk);
442 case ICMP_TIME_EXCEEDED:
449 switch (sk->sk_state) {
450 struct request_sock *req, **prev;
452 if (sock_owned_by_user(sk))
455 req = inet_csk_search_req(sk, &prev, th->dest,
456 iph->daddr, iph->saddr);
460 /* ICMPs are not backlogged, hence we cannot get
461 an established socket here.
465 if (seq != tcp_rsk(req)->snt_isn) {
466 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
471 * Still in SYN_RECV, just remove it silently.
472 * There is no good way to pass the error to the newly
473 * created socket, and POSIX does not want network
474 * errors returned from accept().
476 inet_csk_reqsk_queue_drop(sk, req, prev);
480 case TCP_SYN_RECV: /* Cannot happen.
481 It can f.e. if SYNs crossed.
483 if (!sock_owned_by_user(sk)) {
486 sk->sk_error_report(sk);
490 sk->sk_err_soft = err;
495 /* If we've already connected we will keep trying
496 * until we time out, or the user gives up.
498 * rfc1122 4.2.3.9 allows to consider as hard errors
499 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
500 * but it is obsoleted by pmtu discovery).
502 * Note, that in modern internet, where routing is unreliable
503 * and in each dark corner broken firewalls sit, sending random
504 * errors ordered by their masters even this two messages finally lose
505 * their original sense (even Linux sends invalid PORT_UNREACHs)
507 * Now we are in compliance with RFCs.
512 if (!sock_owned_by_user(sk) && inet->recverr) {
514 sk->sk_error_report(sk);
515 } else { /* Only an error on timeout */
516 sk->sk_err_soft = err;
524 static void __tcp_v4_send_check(struct sk_buff *skb,
525 __be32 saddr, __be32 daddr)
527 struct tcphdr *th = tcp_hdr(skb);
529 if (skb->ip_summed == CHECKSUM_PARTIAL) {
530 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
531 skb->csum_start = skb_transport_header(skb) - skb->head;
532 skb->csum_offset = offsetof(struct tcphdr, check);
534 th->check = tcp_v4_check(skb->len, saddr, daddr,
541 /* This routine computes an IPv4 TCP checksum. */
542 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
544 struct inet_sock *inet = inet_sk(sk);
546 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
548 EXPORT_SYMBOL(tcp_v4_send_check);
550 int tcp_v4_gso_send_check(struct sk_buff *skb)
552 const struct iphdr *iph;
555 if (!pskb_may_pull(skb, sizeof(*th)))
562 skb->ip_summed = CHECKSUM_PARTIAL;
563 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
568 * This routine will send an RST to the other tcp.
570 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
572 * Answer: if a packet caused RST, it is not for a socket
573 * existing in our system, if it is matched to a socket,
574 * it is just duplicate segment or bug in other side's TCP.
575 * So that we build reply only basing on parameters
576 * arrived with segment.
577 * Exception: precedence violation. We do not implement it in any case.
580 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
582 struct tcphdr *th = tcp_hdr(skb);
585 #ifdef CONFIG_TCP_MD5SIG
586 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
589 struct ip_reply_arg arg;
590 #ifdef CONFIG_TCP_MD5SIG
591 struct tcp_md5sig_key *key;
595 /* Never send a reset in response to a reset. */
599 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
602 /* Swap the send and the receive. */
603 memset(&rep, 0, sizeof(rep));
604 rep.th.dest = th->source;
605 rep.th.source = th->dest;
606 rep.th.doff = sizeof(struct tcphdr) / 4;
610 rep.th.seq = th->ack_seq;
613 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
614 skb->len - (th->doff << 2));
617 memset(&arg, 0, sizeof(arg));
618 arg.iov[0].iov_base = (unsigned char *)&rep;
619 arg.iov[0].iov_len = sizeof(rep.th);
621 #ifdef CONFIG_TCP_MD5SIG
622 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
624 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
626 (TCPOPT_MD5SIG << 8) |
628 /* Update length and the length the header thinks exists */
629 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
630 rep.th.doff = arg.iov[0].iov_len / 4;
632 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
633 key, ip_hdr(skb)->saddr,
634 ip_hdr(skb)->daddr, &rep.th);
637 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
638 ip_hdr(skb)->saddr, /* XXX */
639 arg.iov[0].iov_len, IPPROTO_TCP, 0);
640 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
641 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
643 net = dev_net(skb_dst(skb)->dev);
644 ip_send_reply(net->ipv4.tcp_sock, skb,
645 &arg, arg.iov[0].iov_len);
647 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
648 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
651 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
652 outside socket context is ugly, certainly. What can I do?
655 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
656 u32 win, u32 ts, int oif,
657 struct tcp_md5sig_key *key,
660 struct tcphdr *th = tcp_hdr(skb);
663 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
664 #ifdef CONFIG_TCP_MD5SIG
665 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
669 struct ip_reply_arg arg;
670 struct net *net = dev_net(skb_dst(skb)->dev);
672 memset(&rep.th, 0, sizeof(struct tcphdr));
673 memset(&arg, 0, sizeof(arg));
675 arg.iov[0].iov_base = (unsigned char *)&rep;
676 arg.iov[0].iov_len = sizeof(rep.th);
678 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
679 (TCPOPT_TIMESTAMP << 8) |
681 rep.opt[1] = htonl(tcp_time_stamp);
682 rep.opt[2] = htonl(ts);
683 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
686 /* Swap the send and the receive. */
687 rep.th.dest = th->source;
688 rep.th.source = th->dest;
689 rep.th.doff = arg.iov[0].iov_len / 4;
690 rep.th.seq = htonl(seq);
691 rep.th.ack_seq = htonl(ack);
693 rep.th.window = htons(win);
695 #ifdef CONFIG_TCP_MD5SIG
697 int offset = (ts) ? 3 : 0;
699 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
701 (TCPOPT_MD5SIG << 8) |
703 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
704 rep.th.doff = arg.iov[0].iov_len/4;
706 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
707 key, ip_hdr(skb)->saddr,
708 ip_hdr(skb)->daddr, &rep.th);
711 arg.flags = reply_flags;
712 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
713 ip_hdr(skb)->saddr, /* XXX */
714 arg.iov[0].iov_len, IPPROTO_TCP, 0);
715 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
717 arg.bound_dev_if = oif;
719 ip_send_reply(net->ipv4.tcp_sock, skb,
720 &arg, arg.iov[0].iov_len);
722 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
725 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
727 struct inet_timewait_sock *tw = inet_twsk(sk);
728 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
730 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
731 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
734 tcp_twsk_md5_key(tcptw),
735 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
741 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
742 struct request_sock *req)
744 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
745 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
748 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
749 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
753 * Send a SYN-ACK after having received a SYN.
754 * This still operates on a request_sock only, not on a big
757 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
758 struct request_sock *req,
759 struct request_values *rvp)
761 const struct inet_request_sock *ireq = inet_rsk(req);
763 struct sk_buff * skb;
765 /* First, grab a route. */
766 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
769 skb = tcp_make_synack(sk, dst, req, rvp);
772 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
774 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
777 err = net_xmit_eval(err);
784 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
785 struct request_values *rvp)
787 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
788 return tcp_v4_send_synack(sk, NULL, req, rvp);
792 * IPv4 request_sock destructor.
794 static void tcp_v4_reqsk_destructor(struct request_sock *req)
796 kfree(inet_rsk(req)->opt);
799 static void syn_flood_warning(const struct sk_buff *skb)
803 #ifdef CONFIG_SYN_COOKIES
804 if (sysctl_tcp_syncookies)
805 msg = "Sending cookies";
808 msg = "Dropping request";
810 pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
811 ntohs(tcp_hdr(skb)->dest), msg);
815 * Save and compile IPv4 options into the request_sock if needed.
817 static struct ip_options *tcp_v4_save_options(struct sock *sk,
820 struct ip_options *opt = &(IPCB(skb)->opt);
821 struct ip_options *dopt = NULL;
823 if (opt && opt->optlen) {
824 int opt_size = optlength(opt);
825 dopt = kmalloc(opt_size, GFP_ATOMIC);
827 if (ip_options_echo(dopt, skb)) {
836 #ifdef CONFIG_TCP_MD5SIG
838 * RFC2385 MD5 checksumming requires a mapping of
839 * IP address->MD5 Key.
840 * We need to maintain these in the sk structure.
843 /* Find the Key structure for an address. */
844 static struct tcp_md5sig_key *
845 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
847 struct tcp_sock *tp = tcp_sk(sk);
850 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
852 for (i = 0; i < tp->md5sig_info->entries4; i++) {
853 if (tp->md5sig_info->keys4[i].addr == addr)
854 return &tp->md5sig_info->keys4[i].base;
859 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
860 struct sock *addr_sk)
862 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
864 EXPORT_SYMBOL(tcp_v4_md5_lookup);
866 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
867 struct request_sock *req)
869 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
872 /* This can be called on a newly created socket, from other files */
873 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
874 u8 *newkey, u8 newkeylen)
876 /* Add Key to the list */
877 struct tcp_md5sig_key *key;
878 struct tcp_sock *tp = tcp_sk(sk);
879 struct tcp4_md5sig_key *keys;
881 key = tcp_v4_md5_do_lookup(sk, addr);
883 /* Pre-existing entry - just update that one. */
886 key->keylen = newkeylen;
888 struct tcp_md5sig_info *md5sig;
890 if (!tp->md5sig_info) {
891 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
893 if (!tp->md5sig_info) {
897 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
899 if (tcp_alloc_md5sig_pool(sk) == NULL) {
903 md5sig = tp->md5sig_info;
905 if (md5sig->alloced4 == md5sig->entries4) {
906 keys = kmalloc((sizeof(*keys) *
907 (md5sig->entries4 + 1)), GFP_ATOMIC);
910 tcp_free_md5sig_pool();
914 if (md5sig->entries4)
915 memcpy(keys, md5sig->keys4,
916 sizeof(*keys) * md5sig->entries4);
918 /* Free old key list, and reference new one */
919 kfree(md5sig->keys4);
920 md5sig->keys4 = keys;
924 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
925 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
926 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
930 EXPORT_SYMBOL(tcp_v4_md5_do_add);
932 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
933 u8 *newkey, u8 newkeylen)
935 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
939 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
941 struct tcp_sock *tp = tcp_sk(sk);
944 for (i = 0; i < tp->md5sig_info->entries4; i++) {
945 if (tp->md5sig_info->keys4[i].addr == addr) {
947 kfree(tp->md5sig_info->keys4[i].base.key);
948 tp->md5sig_info->entries4--;
950 if (tp->md5sig_info->entries4 == 0) {
951 kfree(tp->md5sig_info->keys4);
952 tp->md5sig_info->keys4 = NULL;
953 tp->md5sig_info->alloced4 = 0;
954 } else if (tp->md5sig_info->entries4 != i) {
955 /* Need to do some manipulation */
956 memmove(&tp->md5sig_info->keys4[i],
957 &tp->md5sig_info->keys4[i+1],
958 (tp->md5sig_info->entries4 - i) *
959 sizeof(struct tcp4_md5sig_key));
961 tcp_free_md5sig_pool();
967 EXPORT_SYMBOL(tcp_v4_md5_do_del);
969 static void tcp_v4_clear_md5_list(struct sock *sk)
971 struct tcp_sock *tp = tcp_sk(sk);
973 /* Free each key, then the set of key keys,
974 * the crypto element, and then decrement our
975 * hold on the last resort crypto.
977 if (tp->md5sig_info->entries4) {
979 for (i = 0; i < tp->md5sig_info->entries4; i++)
980 kfree(tp->md5sig_info->keys4[i].base.key);
981 tp->md5sig_info->entries4 = 0;
982 tcp_free_md5sig_pool();
984 if (tp->md5sig_info->keys4) {
985 kfree(tp->md5sig_info->keys4);
986 tp->md5sig_info->keys4 = NULL;
987 tp->md5sig_info->alloced4 = 0;
991 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
994 struct tcp_md5sig cmd;
995 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
998 if (optlen < sizeof(cmd))
1001 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1004 if (sin->sin_family != AF_INET)
1007 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1008 if (!tcp_sk(sk)->md5sig_info)
1010 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1013 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1016 if (!tcp_sk(sk)->md5sig_info) {
1017 struct tcp_sock *tp = tcp_sk(sk);
1018 struct tcp_md5sig_info *p;
1020 p = kzalloc(sizeof(*p), sk->sk_allocation);
1024 tp->md5sig_info = p;
1025 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1028 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1031 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1032 newkey, cmd.tcpm_keylen);
1035 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1036 __be32 daddr, __be32 saddr, int nbytes)
1038 struct tcp4_pseudohdr *bp;
1039 struct scatterlist sg;
1041 bp = &hp->md5_blk.ip4;
1044 * 1. the TCP pseudo-header (in the order: source IP address,
1045 * destination IP address, zero-padded protocol number, and
1051 bp->protocol = IPPROTO_TCP;
1052 bp->len = cpu_to_be16(nbytes);
1054 sg_init_one(&sg, bp, sizeof(*bp));
1055 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1058 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1059 __be32 daddr, __be32 saddr, struct tcphdr *th)
1061 struct tcp_md5sig_pool *hp;
1062 struct hash_desc *desc;
1064 hp = tcp_get_md5sig_pool();
1066 goto clear_hash_noput;
1067 desc = &hp->md5_desc;
1069 if (crypto_hash_init(desc))
1071 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1073 if (tcp_md5_hash_header(hp, th))
1075 if (tcp_md5_hash_key(hp, key))
1077 if (crypto_hash_final(desc, md5_hash))
1080 tcp_put_md5sig_pool();
1084 tcp_put_md5sig_pool();
1086 memset(md5_hash, 0, 16);
1090 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1091 struct sock *sk, struct request_sock *req,
1092 struct sk_buff *skb)
1094 struct tcp_md5sig_pool *hp;
1095 struct hash_desc *desc;
1096 struct tcphdr *th = tcp_hdr(skb);
1097 __be32 saddr, daddr;
1100 saddr = inet_sk(sk)->inet_saddr;
1101 daddr = inet_sk(sk)->inet_daddr;
1103 saddr = inet_rsk(req)->loc_addr;
1104 daddr = inet_rsk(req)->rmt_addr;
1106 const struct iphdr *iph = ip_hdr(skb);
1111 hp = tcp_get_md5sig_pool();
1113 goto clear_hash_noput;
1114 desc = &hp->md5_desc;
1116 if (crypto_hash_init(desc))
1119 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1121 if (tcp_md5_hash_header(hp, th))
1123 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1125 if (tcp_md5_hash_key(hp, key))
1127 if (crypto_hash_final(desc, md5_hash))
1130 tcp_put_md5sig_pool();
1134 tcp_put_md5sig_pool();
1136 memset(md5_hash, 0, 16);
1139 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1141 static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1144 * This gets called for each TCP segment that arrives
1145 * so we want to be efficient.
1146 * We have 3 drop cases:
1147 * o No MD5 hash and one expected.
1148 * o MD5 hash and we're not expecting one.
1149 * o MD5 hash and its wrong.
1151 __u8 *hash_location = NULL;
1152 struct tcp_md5sig_key *hash_expected;
1153 const struct iphdr *iph = ip_hdr(skb);
1154 struct tcphdr *th = tcp_hdr(skb);
1156 unsigned char newhash[16];
1158 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1159 hash_location = tcp_parse_md5sig_option(th);
1161 /* We've parsed the options - do we have a hash? */
1162 if (!hash_expected && !hash_location)
1165 if (hash_expected && !hash_location) {
1166 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1170 if (!hash_expected && hash_location) {
1171 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1175 /* Okay, so this is hash_expected and hash_location -
1176 * so we need to calculate the checksum.
1178 genhash = tcp_v4_md5_hash_skb(newhash,
1182 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1183 if (net_ratelimit()) {
1184 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1185 &iph->saddr, ntohs(th->source),
1186 &iph->daddr, ntohs(th->dest),
1187 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1196 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1198 .obj_size = sizeof(struct tcp_request_sock),
1199 .rtx_syn_ack = tcp_v4_rtx_synack,
1200 .send_ack = tcp_v4_reqsk_send_ack,
1201 .destructor = tcp_v4_reqsk_destructor,
1202 .send_reset = tcp_v4_send_reset,
1203 .syn_ack_timeout = tcp_syn_ack_timeout,
1206 #ifdef CONFIG_TCP_MD5SIG
1207 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1208 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1209 .calc_md5_hash = tcp_v4_md5_hash_skb,
1213 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1214 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1215 .twsk_unique = tcp_twsk_unique,
1216 .twsk_destructor= tcp_twsk_destructor,
1219 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1221 struct tcp_extend_values tmp_ext;
1222 struct tcp_options_received tmp_opt;
1224 struct request_sock *req;
1225 struct inet_request_sock *ireq;
1226 struct tcp_sock *tp = tcp_sk(sk);
1227 struct dst_entry *dst = NULL;
1228 __be32 saddr = ip_hdr(skb)->saddr;
1229 __be32 daddr = ip_hdr(skb)->daddr;
1230 __u32 isn = TCP_SKB_CB(skb)->when;
1231 #ifdef CONFIG_SYN_COOKIES
1232 int want_cookie = 0;
1234 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1237 /* Never answer to SYNs send to broadcast or multicast */
1238 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1241 /* TW buckets are converted to open requests without
1242 * limitations, they conserve resources and peer is
1243 * evidently real one.
1245 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1246 if (net_ratelimit())
1247 syn_flood_warning(skb);
1248 #ifdef CONFIG_SYN_COOKIES
1249 if (sysctl_tcp_syncookies) {
1256 /* Accept backlog is full. If we have already queued enough
1257 * of warm entries in syn queue, drop request. It is better than
1258 * clogging syn queue with openreqs with exponentially increasing
1261 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1264 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1268 #ifdef CONFIG_TCP_MD5SIG
1269 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1272 tcp_clear_options(&tmp_opt);
1273 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1274 tmp_opt.user_mss = tp->rx_opt.user_mss;
1275 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1277 if (tmp_opt.cookie_plus > 0 &&
1278 tmp_opt.saw_tstamp &&
1279 !tp->rx_opt.cookie_out_never &&
1280 (sysctl_tcp_cookie_size > 0 ||
1281 (tp->cookie_values != NULL &&
1282 tp->cookie_values->cookie_desired > 0))) {
1284 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1285 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1287 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1288 goto drop_and_release;
1290 /* Secret recipe starts with IP addresses */
1291 *mess++ ^= (__force u32)daddr;
1292 *mess++ ^= (__force u32)saddr;
1294 /* plus variable length Initiator Cookie */
1297 *c++ ^= *hash_location++;
1299 #ifdef CONFIG_SYN_COOKIES
1300 want_cookie = 0; /* not our kind of cookie */
1302 tmp_ext.cookie_out_never = 0; /* false */
1303 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1304 } else if (!tp->rx_opt.cookie_in_always) {
1305 /* redundant indications, but ensure initialization. */
1306 tmp_ext.cookie_out_never = 1; /* true */
1307 tmp_ext.cookie_plus = 0;
1309 goto drop_and_release;
1311 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1313 if (want_cookie && !tmp_opt.saw_tstamp)
1314 tcp_clear_options(&tmp_opt);
1316 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1317 tcp_openreq_init(req, &tmp_opt, skb);
1319 ireq = inet_rsk(req);
1320 ireq->loc_addr = daddr;
1321 ireq->rmt_addr = saddr;
1322 ireq->no_srccheck = inet_sk(sk)->transparent;
1323 ireq->opt = tcp_v4_save_options(sk, skb);
1325 if (security_inet_conn_request(sk, skb, req))
1328 if (!want_cookie || tmp_opt.tstamp_ok)
1329 TCP_ECN_create_request(req, tcp_hdr(skb));
1332 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1333 req->cookie_ts = tmp_opt.tstamp_ok;
1335 struct inet_peer *peer = NULL;
1337 /* VJ's idea. We save last timestamp seen
1338 * from the destination in peer table, when entering
1339 * state TIME-WAIT, and check against it before
1340 * accepting new connection request.
1342 * If "isn" is not zero, this request hit alive
1343 * timewait bucket, so that all the necessary checks
1344 * are made in the function processing timewait state.
1346 if (tmp_opt.saw_tstamp &&
1347 tcp_death_row.sysctl_tw_recycle &&
1348 (dst = inet_csk_route_req(sk, req)) != NULL &&
1349 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1350 peer->v4daddr == saddr) {
1351 inet_peer_refcheck(peer);
1352 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1353 (s32)(peer->tcp_ts - req->ts_recent) >
1355 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1356 goto drop_and_release;
1359 /* Kill the following clause, if you dislike this way. */
1360 else if (!sysctl_tcp_syncookies &&
1361 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1362 (sysctl_max_syn_backlog >> 2)) &&
1363 (!peer || !peer->tcp_ts_stamp) &&
1364 (!dst || !dst_metric(dst, RTAX_RTT))) {
1365 /* Without syncookies last quarter of
1366 * backlog is filled with destinations,
1367 * proven to be alive.
1368 * It means that we continue to communicate
1369 * to destinations, already remembered
1370 * to the moment of synflood.
1372 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1373 &saddr, ntohs(tcp_hdr(skb)->source));
1374 goto drop_and_release;
1377 isn = tcp_v4_init_sequence(skb);
1379 tcp_rsk(req)->snt_isn = isn;
1381 if (tcp_v4_send_synack(sk, dst, req,
1382 (struct request_values *)&tmp_ext) ||
1386 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1396 EXPORT_SYMBOL(tcp_v4_conn_request);
1400 * The three way handshake has completed - we got a valid synack -
1401 * now create the new socket.
1403 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1404 struct request_sock *req,
1405 struct dst_entry *dst)
1407 struct inet_request_sock *ireq;
1408 struct inet_sock *newinet;
1409 struct tcp_sock *newtp;
1411 #ifdef CONFIG_TCP_MD5SIG
1412 struct tcp_md5sig_key *key;
1415 if (sk_acceptq_is_full(sk))
1418 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1421 newsk = tcp_create_openreq_child(sk, req, skb);
1425 newsk->sk_gso_type = SKB_GSO_TCPV4;
1426 sk_setup_caps(newsk, dst);
1428 newtp = tcp_sk(newsk);
1429 newinet = inet_sk(newsk);
1430 ireq = inet_rsk(req);
1431 newinet->inet_daddr = ireq->rmt_addr;
1432 newinet->inet_rcv_saddr = ireq->loc_addr;
1433 newinet->inet_saddr = ireq->loc_addr;
1434 newinet->opt = ireq->opt;
1436 newinet->mc_index = inet_iif(skb);
1437 newinet->mc_ttl = ip_hdr(skb)->ttl;
1438 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1440 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1441 newinet->inet_id = newtp->write_seq ^ jiffies;
1443 tcp_mtup_init(newsk);
1444 tcp_sync_mss(newsk, dst_mtu(dst));
1445 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1446 if (tcp_sk(sk)->rx_opt.user_mss &&
1447 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1448 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1450 tcp_initialize_rcv_mss(newsk);
1452 #ifdef CONFIG_TCP_MD5SIG
1453 /* Copy over the MD5 key from the original socket */
1454 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1457 * We're using one, so create a matching key
1458 * on the newsk structure. If we fail to get
1459 * memory, then we end up not copying the key
1462 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1464 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1465 newkey, key->keylen);
1466 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1470 __inet_hash_nolisten(newsk, NULL);
1471 __inet_inherit_port(sk, newsk);
1476 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1478 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1482 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1484 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1486 struct tcphdr *th = tcp_hdr(skb);
1487 const struct iphdr *iph = ip_hdr(skb);
1489 struct request_sock **prev;
1490 /* Find possible connection requests. */
1491 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1492 iph->saddr, iph->daddr);
1494 return tcp_check_req(sk, skb, req, prev);
1496 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1497 th->source, iph->daddr, th->dest, inet_iif(skb));
1500 if (nsk->sk_state != TCP_TIME_WAIT) {
1504 inet_twsk_put(inet_twsk(nsk));
1508 #ifdef CONFIG_SYN_COOKIES
1510 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1515 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1517 const struct iphdr *iph = ip_hdr(skb);
1519 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1520 if (!tcp_v4_check(skb->len, iph->saddr,
1521 iph->daddr, skb->csum)) {
1522 skb->ip_summed = CHECKSUM_UNNECESSARY;
1527 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1528 skb->len, IPPROTO_TCP, 0);
1530 if (skb->len <= 76) {
1531 return __skb_checksum_complete(skb);
1537 /* The socket must have it's spinlock held when we get
1540 * We have a potential double-lock case here, so even when
1541 * doing backlog processing we use the BH locking scheme.
1542 * This is because we cannot sleep with the original spinlock
1545 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1548 #ifdef CONFIG_TCP_MD5SIG
1550 * We really want to reject the packet as early as possible
1552 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1553 * o There is an MD5 option and we're not expecting one
1555 if (tcp_v4_inbound_md5_hash(sk, skb))
1559 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1560 sock_rps_save_rxhash(sk, skb->rxhash);
1561 TCP_CHECK_TIMER(sk);
1562 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1566 TCP_CHECK_TIMER(sk);
1570 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1573 if (sk->sk_state == TCP_LISTEN) {
1574 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1579 if (tcp_child_process(sk, nsk, skb)) {
1586 sock_rps_save_rxhash(sk, skb->rxhash);
1589 TCP_CHECK_TIMER(sk);
1590 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1594 TCP_CHECK_TIMER(sk);
1598 tcp_v4_send_reset(rsk, skb);
1601 /* Be careful here. If this function gets more complicated and
1602 * gcc suffers from register pressure on the x86, sk (in %ebx)
1603 * might be destroyed here. This current version compiles correctly,
1604 * but you have been warned.
1609 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1612 EXPORT_SYMBOL(tcp_v4_do_rcv);
1618 int tcp_v4_rcv(struct sk_buff *skb)
1620 const struct iphdr *iph;
1624 struct net *net = dev_net(skb->dev);
1626 if (skb->pkt_type != PACKET_HOST)
1629 /* Count it even if it's bad */
1630 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1632 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1637 if (th->doff < sizeof(struct tcphdr) / 4)
1639 if (!pskb_may_pull(skb, th->doff * 4))
1642 /* An explanation is required here, I think.
1643 * Packet length and doff are validated by header prediction,
1644 * provided case of th->doff==0 is eliminated.
1645 * So, we defer the checks. */
1646 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1651 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1652 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1653 skb->len - th->doff * 4);
1654 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1655 TCP_SKB_CB(skb)->when = 0;
1656 TCP_SKB_CB(skb)->flags = iph->tos;
1657 TCP_SKB_CB(skb)->sacked = 0;
1659 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1664 if (sk->sk_state == TCP_TIME_WAIT)
1667 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1668 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1669 goto discard_and_relse;
1672 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1673 goto discard_and_relse;
1676 if (sk_filter(sk, skb))
1677 goto discard_and_relse;
1681 bh_lock_sock_nested(sk);
1683 if (!sock_owned_by_user(sk)) {
1684 #ifdef CONFIG_NET_DMA
1685 struct tcp_sock *tp = tcp_sk(sk);
1686 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1687 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1688 if (tp->ucopy.dma_chan)
1689 ret = tcp_v4_do_rcv(sk, skb);
1693 if (!tcp_prequeue(sk, skb))
1694 ret = tcp_v4_do_rcv(sk, skb);
1696 } else if (unlikely(sk_add_backlog(sk, skb))) {
1698 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1699 goto discard_and_relse;
1708 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1711 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1713 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1715 tcp_v4_send_reset(NULL, skb);
1719 /* Discard frame. */
1728 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1729 inet_twsk_put(inet_twsk(sk));
1733 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1734 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1735 inet_twsk_put(inet_twsk(sk));
1738 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1740 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1742 iph->daddr, th->dest,
1745 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1746 inet_twsk_put(inet_twsk(sk));
1750 /* Fall through to ACK */
1753 tcp_v4_timewait_ack(sk, skb);
1757 case TCP_TW_SUCCESS:;
1762 /* VJ's idea. Save last timestamp seen from this destination
1763 * and hold it at least for normal timewait interval to use for duplicate
1764 * segment detection in subsequent connections, before they enter synchronized
1768 int tcp_v4_remember_stamp(struct sock *sk)
1770 struct inet_sock *inet = inet_sk(sk);
1771 struct tcp_sock *tp = tcp_sk(sk);
1772 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1773 struct inet_peer *peer = NULL;
1776 if (!rt || rt->rt_dst != inet->inet_daddr) {
1777 peer = inet_getpeer(inet->inet_daddr, 1);
1781 rt_bind_peer(rt, 1);
1786 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1787 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1788 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1789 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1790 peer->tcp_ts = tp->rx_opt.ts_recent;
1799 EXPORT_SYMBOL(tcp_v4_remember_stamp);
1801 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1803 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1806 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1808 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1809 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1810 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1811 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1812 peer->tcp_ts = tcptw->tw_ts_recent;
1821 const struct inet_connection_sock_af_ops ipv4_specific = {
1822 .queue_xmit = ip_queue_xmit,
1823 .send_check = tcp_v4_send_check,
1824 .rebuild_header = inet_sk_rebuild_header,
1825 .conn_request = tcp_v4_conn_request,
1826 .syn_recv_sock = tcp_v4_syn_recv_sock,
1827 .remember_stamp = tcp_v4_remember_stamp,
1828 .net_header_len = sizeof(struct iphdr),
1829 .setsockopt = ip_setsockopt,
1830 .getsockopt = ip_getsockopt,
1831 .addr2sockaddr = inet_csk_addr2sockaddr,
1832 .sockaddr_len = sizeof(struct sockaddr_in),
1833 .bind_conflict = inet_csk_bind_conflict,
1834 #ifdef CONFIG_COMPAT
1835 .compat_setsockopt = compat_ip_setsockopt,
1836 .compat_getsockopt = compat_ip_getsockopt,
1839 EXPORT_SYMBOL(ipv4_specific);
1841 #ifdef CONFIG_TCP_MD5SIG
1842 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1843 .md5_lookup = tcp_v4_md5_lookup,
1844 .calc_md5_hash = tcp_v4_md5_hash_skb,
1845 .md5_add = tcp_v4_md5_add_func,
1846 .md5_parse = tcp_v4_parse_md5_keys,
1850 /* NOTE: A lot of things set to zero explicitly by call to
1851 * sk_alloc() so need not be done here.
1853 static int tcp_v4_init_sock(struct sock *sk)
1855 struct inet_connection_sock *icsk = inet_csk(sk);
1856 struct tcp_sock *tp = tcp_sk(sk);
1858 skb_queue_head_init(&tp->out_of_order_queue);
1859 tcp_init_xmit_timers(sk);
1860 tcp_prequeue_init(tp);
1862 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1863 tp->mdev = TCP_TIMEOUT_INIT;
1865 /* So many TCP implementations out there (incorrectly) count the
1866 * initial SYN frame in their delayed-ACK and congestion control
1867 * algorithms that we must have the following bandaid to talk
1868 * efficiently to them. -DaveM
1872 /* See draft-stevens-tcpca-spec-01 for discussion of the
1873 * initialization of these values.
1875 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1876 tp->snd_cwnd_clamp = ~0;
1877 tp->mss_cache = TCP_MSS_DEFAULT;
1879 tp->reordering = sysctl_tcp_reordering;
1880 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1882 sk->sk_state = TCP_CLOSE;
1884 sk->sk_write_space = sk_stream_write_space;
1885 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1887 icsk->icsk_af_ops = &ipv4_specific;
1888 icsk->icsk_sync_mss = tcp_sync_mss;
1889 #ifdef CONFIG_TCP_MD5SIG
1890 tp->af_specific = &tcp_sock_ipv4_specific;
1893 /* TCP Cookie Transactions */
1894 if (sysctl_tcp_cookie_size > 0) {
1895 /* Default, cookies without s_data_payload. */
1897 kzalloc(sizeof(*tp->cookie_values),
1899 if (tp->cookie_values != NULL)
1900 kref_init(&tp->cookie_values->kref);
1902 /* Presumed zeroed, in order of appearance:
1903 * cookie_in_always, cookie_out_never,
1904 * s_data_constant, s_data_in, s_data_out
1906 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1907 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1910 percpu_counter_inc(&tcp_sockets_allocated);
1916 void tcp_v4_destroy_sock(struct sock *sk)
1918 struct tcp_sock *tp = tcp_sk(sk);
1920 tcp_clear_xmit_timers(sk);
1922 tcp_cleanup_congestion_control(sk);
1924 /* Cleanup up the write buffer. */
1925 tcp_write_queue_purge(sk);
1927 /* Cleans up our, hopefully empty, out_of_order_queue. */
1928 __skb_queue_purge(&tp->out_of_order_queue);
1930 #ifdef CONFIG_TCP_MD5SIG
1931 /* Clean up the MD5 key list, if any */
1932 if (tp->md5sig_info) {
1933 tcp_v4_clear_md5_list(sk);
1934 kfree(tp->md5sig_info);
1935 tp->md5sig_info = NULL;
1939 #ifdef CONFIG_NET_DMA
1940 /* Cleans up our sk_async_wait_queue */
1941 __skb_queue_purge(&sk->sk_async_wait_queue);
1944 /* Clean prequeue, it must be empty really */
1945 __skb_queue_purge(&tp->ucopy.prequeue);
1947 /* Clean up a referenced TCP bind bucket. */
1948 if (inet_csk(sk)->icsk_bind_hash)
1952 * If sendmsg cached page exists, toss it.
1954 if (sk->sk_sndmsg_page) {
1955 __free_page(sk->sk_sndmsg_page);
1956 sk->sk_sndmsg_page = NULL;
1959 /* TCP Cookie Transactions */
1960 if (tp->cookie_values != NULL) {
1961 kref_put(&tp->cookie_values->kref,
1962 tcp_cookie_values_release);
1963 tp->cookie_values = NULL;
1966 percpu_counter_dec(&tcp_sockets_allocated);
1968 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1970 #ifdef CONFIG_PROC_FS
1971 /* Proc filesystem TCP sock list dumping. */
1973 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1975 return hlist_nulls_empty(head) ? NULL :
1976 list_entry(head->first, struct inet_timewait_sock, tw_node);
1979 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1981 return !is_a_nulls(tw->tw_node.next) ?
1982 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1986 * Get next listener socket follow cur. If cur is NULL, get first socket
1987 * starting from bucket given in st->bucket; when st->bucket is zero the
1988 * very first socket in the hash table is returned.
1990 static void *listening_get_next(struct seq_file *seq, void *cur)
1992 struct inet_connection_sock *icsk;
1993 struct hlist_nulls_node *node;
1994 struct sock *sk = cur;
1995 struct inet_listen_hashbucket *ilb;
1996 struct tcp_iter_state *st = seq->private;
1997 struct net *net = seq_file_net(seq);
2000 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2001 spin_lock_bh(&ilb->lock);
2002 sk = sk_nulls_head(&ilb->head);
2006 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2010 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2011 struct request_sock *req = cur;
2013 icsk = inet_csk(st->syn_wait_sk);
2017 if (req->rsk_ops->family == st->family) {
2024 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2027 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2029 sk = sk_next(st->syn_wait_sk);
2030 st->state = TCP_SEQ_STATE_LISTENING;
2031 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2033 icsk = inet_csk(sk);
2034 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2035 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2037 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2041 sk_nulls_for_each_from(sk, node) {
2042 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
2046 icsk = inet_csk(sk);
2047 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2048 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2050 st->uid = sock_i_uid(sk);
2051 st->syn_wait_sk = sk;
2052 st->state = TCP_SEQ_STATE_OPENREQ;
2056 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2058 spin_unlock_bh(&ilb->lock);
2060 if (++st->bucket < INET_LHTABLE_SIZE) {
2061 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2062 spin_lock_bh(&ilb->lock);
2063 sk = sk_nulls_head(&ilb->head);
2071 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2073 struct tcp_iter_state *st = seq->private;
2078 rc = listening_get_next(seq, NULL);
2080 while (rc && *pos) {
2081 rc = listening_get_next(seq, rc);
2087 static inline int empty_bucket(struct tcp_iter_state *st)
2089 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2090 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2094 * Get first established socket starting from bucket given in st->bucket.
2095 * If st->bucket is zero, the very first socket in the hash is returned.
2097 static void *established_get_first(struct seq_file *seq)
2099 struct tcp_iter_state *st = seq->private;
2100 struct net *net = seq_file_net(seq);
2104 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2106 struct hlist_nulls_node *node;
2107 struct inet_timewait_sock *tw;
2108 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2110 /* Lockless fast path for the common case of empty buckets */
2111 if (empty_bucket(st))
2115 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2116 if (sk->sk_family != st->family ||
2117 !net_eq(sock_net(sk), net)) {
2123 st->state = TCP_SEQ_STATE_TIME_WAIT;
2124 inet_twsk_for_each(tw, node,
2125 &tcp_hashinfo.ehash[st->bucket].twchain) {
2126 if (tw->tw_family != st->family ||
2127 !net_eq(twsk_net(tw), net)) {
2133 spin_unlock_bh(lock);
2134 st->state = TCP_SEQ_STATE_ESTABLISHED;
2140 static void *established_get_next(struct seq_file *seq, void *cur)
2142 struct sock *sk = cur;
2143 struct inet_timewait_sock *tw;
2144 struct hlist_nulls_node *node;
2145 struct tcp_iter_state *st = seq->private;
2146 struct net *net = seq_file_net(seq);
2151 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2155 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2162 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2163 st->state = TCP_SEQ_STATE_ESTABLISHED;
2165 /* Look for next non empty bucket */
2167 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2170 if (st->bucket > tcp_hashinfo.ehash_mask)
2173 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2174 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2176 sk = sk_nulls_next(sk);
2178 sk_nulls_for_each_from(sk, node) {
2179 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2183 st->state = TCP_SEQ_STATE_TIME_WAIT;
2184 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2192 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2194 struct tcp_iter_state *st = seq->private;
2198 rc = established_get_first(seq);
2201 rc = established_get_next(seq, rc);
2207 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2210 struct tcp_iter_state *st = seq->private;
2212 st->state = TCP_SEQ_STATE_LISTENING;
2213 rc = listening_get_idx(seq, &pos);
2216 st->state = TCP_SEQ_STATE_ESTABLISHED;
2217 rc = established_get_idx(seq, pos);
2223 static void *tcp_seek_last_pos(struct seq_file *seq)
2225 struct tcp_iter_state *st = seq->private;
2226 int offset = st->offset;
2227 int orig_num = st->num;
2230 switch (st->state) {
2231 case TCP_SEQ_STATE_OPENREQ:
2232 case TCP_SEQ_STATE_LISTENING:
2233 if (st->bucket >= INET_LHTABLE_SIZE)
2235 st->state = TCP_SEQ_STATE_LISTENING;
2236 rc = listening_get_next(seq, NULL);
2237 while (offset-- && rc)
2238 rc = listening_get_next(seq, rc);
2243 case TCP_SEQ_STATE_ESTABLISHED:
2244 case TCP_SEQ_STATE_TIME_WAIT:
2245 st->state = TCP_SEQ_STATE_ESTABLISHED;
2246 if (st->bucket > tcp_hashinfo.ehash_mask)
2248 rc = established_get_first(seq);
2249 while (offset-- && rc)
2250 rc = established_get_next(seq, rc);
2258 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2260 struct tcp_iter_state *st = seq->private;
2263 if (*pos && *pos == st->last_pos) {
2264 rc = tcp_seek_last_pos(seq);
2269 st->state = TCP_SEQ_STATE_LISTENING;
2273 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2276 st->last_pos = *pos;
2280 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2282 struct tcp_iter_state *st = seq->private;
2285 if (v == SEQ_START_TOKEN) {
2286 rc = tcp_get_idx(seq, 0);
2290 switch (st->state) {
2291 case TCP_SEQ_STATE_OPENREQ:
2292 case TCP_SEQ_STATE_LISTENING:
2293 rc = listening_get_next(seq, v);
2295 st->state = TCP_SEQ_STATE_ESTABLISHED;
2298 rc = established_get_first(seq);
2301 case TCP_SEQ_STATE_ESTABLISHED:
2302 case TCP_SEQ_STATE_TIME_WAIT:
2303 rc = established_get_next(seq, v);
2308 st->last_pos = *pos;
2312 static void tcp_seq_stop(struct seq_file *seq, void *v)
2314 struct tcp_iter_state *st = seq->private;
2316 switch (st->state) {
2317 case TCP_SEQ_STATE_OPENREQ:
2319 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2320 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2322 case TCP_SEQ_STATE_LISTENING:
2323 if (v != SEQ_START_TOKEN)
2324 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2326 case TCP_SEQ_STATE_TIME_WAIT:
2327 case TCP_SEQ_STATE_ESTABLISHED:
2329 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2334 static int tcp_seq_open(struct inode *inode, struct file *file)
2336 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2337 struct tcp_iter_state *s;
2340 err = seq_open_net(inode, file, &afinfo->seq_ops,
2341 sizeof(struct tcp_iter_state));
2345 s = ((struct seq_file *)file->private_data)->private;
2346 s->family = afinfo->family;
2351 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2354 struct proc_dir_entry *p;
2356 afinfo->seq_fops.open = tcp_seq_open;
2357 afinfo->seq_fops.read = seq_read;
2358 afinfo->seq_fops.llseek = seq_lseek;
2359 afinfo->seq_fops.release = seq_release_net;
2361 afinfo->seq_ops.start = tcp_seq_start;
2362 afinfo->seq_ops.next = tcp_seq_next;
2363 afinfo->seq_ops.stop = tcp_seq_stop;
2365 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2366 &afinfo->seq_fops, afinfo);
2371 EXPORT_SYMBOL(tcp_proc_register);
2373 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2375 proc_net_remove(net, afinfo->name);
2377 EXPORT_SYMBOL(tcp_proc_unregister);
2379 static void get_openreq4(struct sock *sk, struct request_sock *req,
2380 struct seq_file *f, int i, int uid, int *len)
2382 const struct inet_request_sock *ireq = inet_rsk(req);
2383 int ttd = req->expires - jiffies;
2385 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2386 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2389 ntohs(inet_sk(sk)->inet_sport),
2391 ntohs(ireq->rmt_port),
2393 0, 0, /* could print option size, but that is af dependent. */
2394 1, /* timers active (only the expire timer) */
2395 jiffies_to_clock_t(ttd),
2398 0, /* non standard timer */
2399 0, /* open_requests have no inode */
2400 atomic_read(&sk->sk_refcnt),
2405 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2408 unsigned long timer_expires;
2409 struct tcp_sock *tp = tcp_sk(sk);
2410 const struct inet_connection_sock *icsk = inet_csk(sk);
2411 struct inet_sock *inet = inet_sk(sk);
2412 __be32 dest = inet->inet_daddr;
2413 __be32 src = inet->inet_rcv_saddr;
2414 __u16 destp = ntohs(inet->inet_dport);
2415 __u16 srcp = ntohs(inet->inet_sport);
2418 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2420 timer_expires = icsk->icsk_timeout;
2421 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2423 timer_expires = icsk->icsk_timeout;
2424 } else if (timer_pending(&sk->sk_timer)) {
2426 timer_expires = sk->sk_timer.expires;
2429 timer_expires = jiffies;
2432 if (sk->sk_state == TCP_LISTEN)
2433 rx_queue = sk->sk_ack_backlog;
2436 * because we dont lock socket, we might find a transient negative value
2438 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2440 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2441 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2442 i, src, srcp, dest, destp, sk->sk_state,
2443 tp->write_seq - tp->snd_una,
2446 jiffies_to_clock_t(timer_expires - jiffies),
2447 icsk->icsk_retransmits,
2449 icsk->icsk_probes_out,
2451 atomic_read(&sk->sk_refcnt), sk,
2452 jiffies_to_clock_t(icsk->icsk_rto),
2453 jiffies_to_clock_t(icsk->icsk_ack.ato),
2454 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2456 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2460 static void get_timewait4_sock(struct inet_timewait_sock *tw,
2461 struct seq_file *f, int i, int *len)
2465 int ttd = tw->tw_ttd - jiffies;
2470 dest = tw->tw_daddr;
2471 src = tw->tw_rcv_saddr;
2472 destp = ntohs(tw->tw_dport);
2473 srcp = ntohs(tw->tw_sport);
2475 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2476 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2477 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2478 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2479 atomic_read(&tw->tw_refcnt), tw, len);
2484 static int tcp4_seq_show(struct seq_file *seq, void *v)
2486 struct tcp_iter_state *st;
2489 if (v == SEQ_START_TOKEN) {
2490 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2491 " sl local_address rem_address st tx_queue "
2492 "rx_queue tr tm->when retrnsmt uid timeout "
2498 switch (st->state) {
2499 case TCP_SEQ_STATE_LISTENING:
2500 case TCP_SEQ_STATE_ESTABLISHED:
2501 get_tcp4_sock(v, seq, st->num, &len);
2503 case TCP_SEQ_STATE_OPENREQ:
2504 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2506 case TCP_SEQ_STATE_TIME_WAIT:
2507 get_timewait4_sock(v, seq, st->num, &len);
2510 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2515 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2519 .owner = THIS_MODULE,
2522 .show = tcp4_seq_show,
2526 static int __net_init tcp4_proc_init_net(struct net *net)
2528 return tcp_proc_register(net, &tcp4_seq_afinfo);
2531 static void __net_exit tcp4_proc_exit_net(struct net *net)
2533 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2536 static struct pernet_operations tcp4_net_ops = {
2537 .init = tcp4_proc_init_net,
2538 .exit = tcp4_proc_exit_net,
2541 int __init tcp4_proc_init(void)
2543 return register_pernet_subsys(&tcp4_net_ops);
2546 void tcp4_proc_exit(void)
2548 unregister_pernet_subsys(&tcp4_net_ops);
2550 #endif /* CONFIG_PROC_FS */
2552 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2554 struct iphdr *iph = skb_gro_network_header(skb);
2556 switch (skb->ip_summed) {
2557 case CHECKSUM_COMPLETE:
2558 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2560 skb->ip_summed = CHECKSUM_UNNECESSARY;
2566 NAPI_GRO_CB(skb)->flush = 1;
2570 return tcp_gro_receive(head, skb);
2572 EXPORT_SYMBOL(tcp4_gro_receive);
2574 int tcp4_gro_complete(struct sk_buff *skb)
2576 struct iphdr *iph = ip_hdr(skb);
2577 struct tcphdr *th = tcp_hdr(skb);
2579 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2580 iph->saddr, iph->daddr, 0);
2581 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2583 return tcp_gro_complete(skb);
2585 EXPORT_SYMBOL(tcp4_gro_complete);
2587 struct proto tcp_prot = {
2589 .owner = THIS_MODULE,
2591 .connect = tcp_v4_connect,
2592 .disconnect = tcp_disconnect,
2593 .accept = inet_csk_accept,
2595 .init = tcp_v4_init_sock,
2596 .destroy = tcp_v4_destroy_sock,
2597 .shutdown = tcp_shutdown,
2598 .setsockopt = tcp_setsockopt,
2599 .getsockopt = tcp_getsockopt,
2600 .recvmsg = tcp_recvmsg,
2601 .sendmsg = tcp_sendmsg,
2602 .sendpage = tcp_sendpage,
2603 .backlog_rcv = tcp_v4_do_rcv,
2605 .unhash = inet_unhash,
2606 .get_port = inet_csk_get_port,
2607 .enter_memory_pressure = tcp_enter_memory_pressure,
2608 .sockets_allocated = &tcp_sockets_allocated,
2609 .orphan_count = &tcp_orphan_count,
2610 .memory_allocated = &tcp_memory_allocated,
2611 .memory_pressure = &tcp_memory_pressure,
2612 .sysctl_mem = sysctl_tcp_mem,
2613 .sysctl_wmem = sysctl_tcp_wmem,
2614 .sysctl_rmem = sysctl_tcp_rmem,
2615 .max_header = MAX_TCP_HEADER,
2616 .obj_size = sizeof(struct tcp_sock),
2617 .slab_flags = SLAB_DESTROY_BY_RCU,
2618 .twsk_prot = &tcp_timewait_sock_ops,
2619 .rsk_prot = &tcp_request_sock_ops,
2620 .h.hashinfo = &tcp_hashinfo,
2621 .no_autobind = true,
2622 #ifdef CONFIG_COMPAT
2623 .compat_setsockopt = compat_tcp_setsockopt,
2624 .compat_getsockopt = compat_tcp_getsockopt,
2627 EXPORT_SYMBOL(tcp_prot);
2630 static int __net_init tcp_sk_init(struct net *net)
2632 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2633 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2636 static void __net_exit tcp_sk_exit(struct net *net)
2638 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2641 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2643 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2646 static struct pernet_operations __net_initdata tcp_sk_ops = {
2647 .init = tcp_sk_init,
2648 .exit = tcp_sk_exit,
2649 .exit_batch = tcp_sk_exit_batch,
2652 void __init tcp_v4_init(void)
2654 inet_hashinfo_init(&tcp_hashinfo);
2655 if (register_pernet_subsys(&tcp_sk_ops))
2656 panic("Failed to create the TCP control socket.\n");