upload tizen1.0 source
[kernel/linux-2.6.36.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
64
65 #include <net/net_namespace.h>
66 #include <net/icmp.h>
67 #include <net/inet_hashtables.h>
68 #include <net/tcp.h>
69 #include <net/transp_v6.h>
70 #include <net/ipv6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
73 #include <net/xfrm.h>
74 #include <net/netdma.h>
75
76 #include <linux/inet.h>
77 #include <linux/ipv6.h>
78 #include <linux/stddef.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
81
82 #include <linux/crypto.h>
83 #include <linux/scatterlist.h>
84
85 int sysctl_tcp_tw_reuse __read_mostly;
86 int sysctl_tcp_low_latency __read_mostly;
87 EXPORT_SYMBOL(sysctl_tcp_low_latency);
88
89
90 #ifdef CONFIG_TCP_MD5SIG
91 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
92                                                    __be32 addr);
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
94                                __be32 daddr, __be32 saddr, struct tcphdr *th);
95 #else
96 static inline
97 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
98 {
99         return NULL;
100 }
101 #endif
102
103 struct inet_hashinfo tcp_hashinfo;
104 EXPORT_SYMBOL(tcp_hashinfo);
105
106 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
107 {
108         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
109                                           ip_hdr(skb)->saddr,
110                                           tcp_hdr(skb)->dest,
111                                           tcp_hdr(skb)->source);
112 }
113
114 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
115 {
116         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
117         struct tcp_sock *tp = tcp_sk(sk);
118
119         /* With PAWS, it is safe from the viewpoint
120            of data integrity. Even without PAWS it is safe provided sequence
121            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
122
123            Actually, the idea is close to VJ's one, only timestamp cache is
124            held not per host, but per port pair and TW bucket is used as state
125            holder.
126
127            If TW bucket has been already destroyed we fall back to VJ's scheme
128            and use initial timestamp retrieved from peer table.
129          */
130         if (tcptw->tw_ts_recent_stamp &&
131             (twp == NULL || (sysctl_tcp_tw_reuse &&
132                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
133                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
134                 if (tp->write_seq == 0)
135                         tp->write_seq = 1;
136                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
137                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138                 sock_hold(sktw);
139                 return 1;
140         }
141
142         return 0;
143 }
144 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
145
146 /* This will initiate an outgoing connection. */
147 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
148 {
149         struct inet_sock *inet = inet_sk(sk);
150         struct tcp_sock *tp = tcp_sk(sk);
151         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
152         struct rtable *rt;
153         __be32 daddr, nexthop;
154         int tmp;
155         int err;
156
157         if (addr_len < sizeof(struct sockaddr_in))
158                 return -EINVAL;
159
160         if (usin->sin_family != AF_INET)
161                 return -EAFNOSUPPORT;
162
163         nexthop = daddr = usin->sin_addr.s_addr;
164         if (inet->opt && inet->opt->srr) {
165                 if (!daddr)
166                         return -EINVAL;
167                 nexthop = inet->opt->faddr;
168         }
169
170         tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
171                                RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172                                IPPROTO_TCP,
173                                inet->inet_sport, usin->sin_port, sk, 1);
174         if (tmp < 0) {
175                 if (tmp == -ENETUNREACH)
176                         IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
177                 return tmp;
178         }
179
180         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
181                 ip_rt_put(rt);
182                 return -ENETUNREACH;
183         }
184
185         if (!inet->opt || !inet->opt->srr)
186                 daddr = rt->rt_dst;
187
188         if (!inet->inet_saddr)
189                 inet->inet_saddr = rt->rt_src;
190         inet->inet_rcv_saddr = inet->inet_saddr;
191
192         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
193                 /* Reset inherited state */
194                 tp->rx_opt.ts_recent       = 0;
195                 tp->rx_opt.ts_recent_stamp = 0;
196                 tp->write_seq              = 0;
197         }
198
199         if (tcp_death_row.sysctl_tw_recycle &&
200             !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
201                 struct inet_peer *peer = rt_get_peer(rt);
202                 /*
203                  * VJ's idea. We save last timestamp seen from
204                  * the destination in peer table, when entering state
205                  * TIME-WAIT * and initialize rx_opt.ts_recent from it,
206                  * when trying new connection.
207                  */
208                 if (peer) {
209                         inet_peer_refcheck(peer);
210                         if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
211                                 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
212                                 tp->rx_opt.ts_recent = peer->tcp_ts;
213                         }
214                 }
215         }
216
217         inet->inet_dport = usin->sin_port;
218         inet->inet_daddr = daddr;
219
220         inet_csk(sk)->icsk_ext_hdr_len = 0;
221         if (inet->opt)
222                 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
223
224         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
225
226         /* Socket identity is still unknown (sport may be zero).
227          * However we set state to SYN-SENT and not releasing socket
228          * lock select source port, enter ourselves into the hash tables and
229          * complete initialization after this.
230          */
231         tcp_set_state(sk, TCP_SYN_SENT);
232         err = inet_hash_connect(&tcp_death_row, sk);
233         if (err)
234                 goto failure;
235
236         err = ip_route_newports(&rt, IPPROTO_TCP,
237                                 inet->inet_sport, inet->inet_dport, sk);
238         if (err)
239                 goto failure;
240
241         /* OK, now commit destination to socket.  */
242         sk->sk_gso_type = SKB_GSO_TCPV4;
243         sk_setup_caps(sk, &rt->dst);
244
245         if (!tp->write_seq)
246                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247                                                            inet->inet_daddr,
248                                                            inet->inet_sport,
249                                                            usin->sin_port);
250
251         inet->inet_id = tp->write_seq ^ jiffies;
252
253         err = tcp_connect(sk);
254         rt = NULL;
255         if (err)
256                 goto failure;
257
258         return 0;
259
260 failure:
261         /*
262          * This unhashes the socket and releases the local port,
263          * if necessary.
264          */
265         tcp_set_state(sk, TCP_CLOSE);
266         ip_rt_put(rt);
267         sk->sk_route_caps = 0;
268         inet->inet_dport = 0;
269         return err;
270 }
271 EXPORT_SYMBOL(tcp_v4_connect);
272
273 /*
274  * This routine does path mtu discovery as defined in RFC1191.
275  */
276 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
277 {
278         struct dst_entry *dst;
279         struct inet_sock *inet = inet_sk(sk);
280
281         /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
282          * send out by Linux are always <576bytes so they should go through
283          * unfragmented).
284          */
285         if (sk->sk_state == TCP_LISTEN)
286                 return;
287
288         /* We don't check in the destentry if pmtu discovery is forbidden
289          * on this route. We just assume that no packet_to_big packets
290          * are send back when pmtu discovery is not active.
291          * There is a small race when the user changes this flag in the
292          * route, but I think that's acceptable.
293          */
294         if ((dst = __sk_dst_check(sk, 0)) == NULL)
295                 return;
296
297         dst->ops->update_pmtu(dst, mtu);
298
299         /* Something is about to be wrong... Remember soft error
300          * for the case, if this connection will not able to recover.
301          */
302         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303                 sk->sk_err_soft = EMSGSIZE;
304
305         mtu = dst_mtu(dst);
306
307         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
308             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
309                 tcp_sync_mss(sk, mtu);
310
311                 /* Resend the TCP packet because it's
312                  * clear that the old packet has been
313                  * dropped. This is the new "fast" path mtu
314                  * discovery.
315                  */
316                 tcp_simple_retransmit(sk);
317         } /* else let the usual retransmit timer handle it */
318 }
319
320 /*
321  * This routine is called by the ICMP module when it gets some
322  * sort of error condition.  If err < 0 then the socket should
323  * be closed and the error returned to the user.  If err > 0
324  * it's just the icmp type << 8 | icmp code.  After adjustment
325  * header points to the first 8 bytes of the tcp header.  We need
326  * to find the appropriate port.
327  *
328  * The locking strategy used here is very "optimistic". When
329  * someone else accesses the socket the ICMP is just dropped
330  * and for some paths there is no check at all.
331  * A more general error queue to queue errors for later handling
332  * is probably better.
333  *
334  */
335
336 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
337 {
338         struct iphdr *iph = (struct iphdr *)icmp_skb->data;
339         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
340         struct inet_connection_sock *icsk;
341         struct tcp_sock *tp;
342         struct inet_sock *inet;
343         const int type = icmp_hdr(icmp_skb)->type;
344         const int code = icmp_hdr(icmp_skb)->code;
345         struct sock *sk;
346         struct sk_buff *skb;
347         __u32 seq;
348         __u32 remaining;
349         int err;
350         struct net *net = dev_net(icmp_skb->dev);
351
352         if (icmp_skb->len < (iph->ihl << 2) + 8) {
353                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
354                 return;
355         }
356
357         sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
358                         iph->saddr, th->source, inet_iif(icmp_skb));
359         if (!sk) {
360                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
361                 return;
362         }
363         if (sk->sk_state == TCP_TIME_WAIT) {
364                 inet_twsk_put(inet_twsk(sk));
365                 return;
366         }
367
368         bh_lock_sock(sk);
369         /* If too many ICMPs get dropped on busy
370          * servers this needs to be solved differently.
371          */
372         if (sock_owned_by_user(sk))
373                 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
374
375         if (sk->sk_state == TCP_CLOSE)
376                 goto out;
377
378         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
379                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
380                 goto out;
381         }
382
383         icsk = inet_csk(sk);
384         tp = tcp_sk(sk);
385         seq = ntohl(th->seq);
386         if (sk->sk_state != TCP_LISTEN &&
387             !between(seq, tp->snd_una, tp->snd_nxt)) {
388                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
389                 goto out;
390         }
391
392         switch (type) {
393         case ICMP_SOURCE_QUENCH:
394                 /* Just silently ignore these. */
395                 goto out;
396         case ICMP_PARAMETERPROB:
397                 err = EPROTO;
398                 break;
399         case ICMP_DEST_UNREACH:
400                 if (code > NR_ICMP_UNREACH)
401                         goto out;
402
403                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
404                         if (!sock_owned_by_user(sk))
405                                 do_pmtu_discovery(sk, iph, info);
406                         goto out;
407                 }
408
409                 err = icmp_err_convert[code].errno;
410                 /* check if icmp_skb allows revert of backoff
411                  * (see draft-zimmermann-tcp-lcd) */
412                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
413                         break;
414                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
415                     !icsk->icsk_backoff)
416                         break;
417
418                 if (sock_owned_by_user(sk))
419                         break;
420
421                 icsk->icsk_backoff--;
422                 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
423                                          icsk->icsk_backoff;
424                 tcp_bound_rto(sk);
425
426                 skb = tcp_write_queue_head(sk);
427                 BUG_ON(!skb);
428
429                 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
430                                 tcp_time_stamp - TCP_SKB_CB(skb)->when);
431
432                 if (remaining) {
433                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
434                                                   remaining, TCP_RTO_MAX);
435                 } else {
436                         /* RTO revert clocked out retransmission.
437                          * Will retransmit now */
438                         tcp_retransmit_timer(sk);
439                 }
440
441                 break;
442         case ICMP_TIME_EXCEEDED:
443                 err = EHOSTUNREACH;
444                 break;
445         default:
446                 goto out;
447         }
448
449         switch (sk->sk_state) {
450                 struct request_sock *req, **prev;
451         case TCP_LISTEN:
452                 if (sock_owned_by_user(sk))
453                         goto out;
454
455                 req = inet_csk_search_req(sk, &prev, th->dest,
456                                           iph->daddr, iph->saddr);
457                 if (!req)
458                         goto out;
459
460                 /* ICMPs are not backlogged, hence we cannot get
461                    an established socket here.
462                  */
463                 WARN_ON(req->sk);
464
465                 if (seq != tcp_rsk(req)->snt_isn) {
466                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
467                         goto out;
468                 }
469
470                 /*
471                  * Still in SYN_RECV, just remove it silently.
472                  * There is no good way to pass the error to the newly
473                  * created socket, and POSIX does not want network
474                  * errors returned from accept().
475                  */
476                 inet_csk_reqsk_queue_drop(sk, req, prev);
477                 goto out;
478
479         case TCP_SYN_SENT:
480         case TCP_SYN_RECV:  /* Cannot happen.
481                                It can f.e. if SYNs crossed.
482                              */
483                 if (!sock_owned_by_user(sk)) {
484                         sk->sk_err = err;
485
486                         sk->sk_error_report(sk);
487
488                         tcp_done(sk);
489                 } else {
490                         sk->sk_err_soft = err;
491                 }
492                 goto out;
493         }
494
495         /* If we've already connected we will keep trying
496          * until we time out, or the user gives up.
497          *
498          * rfc1122 4.2.3.9 allows to consider as hard errors
499          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
500          * but it is obsoleted by pmtu discovery).
501          *
502          * Note, that in modern internet, where routing is unreliable
503          * and in each dark corner broken firewalls sit, sending random
504          * errors ordered by their masters even this two messages finally lose
505          * their original sense (even Linux sends invalid PORT_UNREACHs)
506          *
507          * Now we are in compliance with RFCs.
508          *                                                      --ANK (980905)
509          */
510
511         inet = inet_sk(sk);
512         if (!sock_owned_by_user(sk) && inet->recverr) {
513                 sk->sk_err = err;
514                 sk->sk_error_report(sk);
515         } else  { /* Only an error on timeout */
516                 sk->sk_err_soft = err;
517         }
518
519 out:
520         bh_unlock_sock(sk);
521         sock_put(sk);
522 }
523
524 static void __tcp_v4_send_check(struct sk_buff *skb,
525                                 __be32 saddr, __be32 daddr)
526 {
527         struct tcphdr *th = tcp_hdr(skb);
528
529         if (skb->ip_summed == CHECKSUM_PARTIAL) {
530                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
531                 skb->csum_start = skb_transport_header(skb) - skb->head;
532                 skb->csum_offset = offsetof(struct tcphdr, check);
533         } else {
534                 th->check = tcp_v4_check(skb->len, saddr, daddr,
535                                          csum_partial(th,
536                                                       th->doff << 2,
537                                                       skb->csum));
538         }
539 }
540
541 /* This routine computes an IPv4 TCP checksum. */
542 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
543 {
544         struct inet_sock *inet = inet_sk(sk);
545
546         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
547 }
548 EXPORT_SYMBOL(tcp_v4_send_check);
549
550 int tcp_v4_gso_send_check(struct sk_buff *skb)
551 {
552         const struct iphdr *iph;
553         struct tcphdr *th;
554
555         if (!pskb_may_pull(skb, sizeof(*th)))
556                 return -EINVAL;
557
558         iph = ip_hdr(skb);
559         th = tcp_hdr(skb);
560
561         th->check = 0;
562         skb->ip_summed = CHECKSUM_PARTIAL;
563         __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
564         return 0;
565 }
566
567 /*
568  *      This routine will send an RST to the other tcp.
569  *
570  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
571  *                    for reset.
572  *      Answer: if a packet caused RST, it is not for a socket
573  *              existing in our system, if it is matched to a socket,
574  *              it is just duplicate segment or bug in other side's TCP.
575  *              So that we build reply only basing on parameters
576  *              arrived with segment.
577  *      Exception: precedence violation. We do not implement it in any case.
578  */
579
580 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
581 {
582         struct tcphdr *th = tcp_hdr(skb);
583         struct {
584                 struct tcphdr th;
585 #ifdef CONFIG_TCP_MD5SIG
586                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
587 #endif
588         } rep;
589         struct ip_reply_arg arg;
590 #ifdef CONFIG_TCP_MD5SIG
591         struct tcp_md5sig_key *key;
592 #endif
593         struct net *net;
594
595         /* Never send a reset in response to a reset. */
596         if (th->rst)
597                 return;
598
599         if (skb_rtable(skb)->rt_type != RTN_LOCAL)
600                 return;
601
602         /* Swap the send and the receive. */
603         memset(&rep, 0, sizeof(rep));
604         rep.th.dest   = th->source;
605         rep.th.source = th->dest;
606         rep.th.doff   = sizeof(struct tcphdr) / 4;
607         rep.th.rst    = 1;
608
609         if (th->ack) {
610                 rep.th.seq = th->ack_seq;
611         } else {
612                 rep.th.ack = 1;
613                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
614                                        skb->len - (th->doff << 2));
615         }
616
617         memset(&arg, 0, sizeof(arg));
618         arg.iov[0].iov_base = (unsigned char *)&rep;
619         arg.iov[0].iov_len  = sizeof(rep.th);
620
621 #ifdef CONFIG_TCP_MD5SIG
622         key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
623         if (key) {
624                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
625                                    (TCPOPT_NOP << 16) |
626                                    (TCPOPT_MD5SIG << 8) |
627                                    TCPOLEN_MD5SIG);
628                 /* Update length and the length the header thinks exists */
629                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
630                 rep.th.doff = arg.iov[0].iov_len / 4;
631
632                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
633                                      key, ip_hdr(skb)->saddr,
634                                      ip_hdr(skb)->daddr, &rep.th);
635         }
636 #endif
637         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
638                                       ip_hdr(skb)->saddr, /* XXX */
639                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
640         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
641         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
642
643         net = dev_net(skb_dst(skb)->dev);
644         ip_send_reply(net->ipv4.tcp_sock, skb,
645                       &arg, arg.iov[0].iov_len);
646
647         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
648         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
649 }
650
651 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
652    outside socket context is ugly, certainly. What can I do?
653  */
654
655 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
656                             u32 win, u32 ts, int oif,
657                             struct tcp_md5sig_key *key,
658                             int reply_flags)
659 {
660         struct tcphdr *th = tcp_hdr(skb);
661         struct {
662                 struct tcphdr th;
663                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
664 #ifdef CONFIG_TCP_MD5SIG
665                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
666 #endif
667                         ];
668         } rep;
669         struct ip_reply_arg arg;
670         struct net *net = dev_net(skb_dst(skb)->dev);
671
672         memset(&rep.th, 0, sizeof(struct tcphdr));
673         memset(&arg, 0, sizeof(arg));
674
675         arg.iov[0].iov_base = (unsigned char *)&rep;
676         arg.iov[0].iov_len  = sizeof(rep.th);
677         if (ts) {
678                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
679                                    (TCPOPT_TIMESTAMP << 8) |
680                                    TCPOLEN_TIMESTAMP);
681                 rep.opt[1] = htonl(tcp_time_stamp);
682                 rep.opt[2] = htonl(ts);
683                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
684         }
685
686         /* Swap the send and the receive. */
687         rep.th.dest    = th->source;
688         rep.th.source  = th->dest;
689         rep.th.doff    = arg.iov[0].iov_len / 4;
690         rep.th.seq     = htonl(seq);
691         rep.th.ack_seq = htonl(ack);
692         rep.th.ack     = 1;
693         rep.th.window  = htons(win);
694
695 #ifdef CONFIG_TCP_MD5SIG
696         if (key) {
697                 int offset = (ts) ? 3 : 0;
698
699                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
700                                           (TCPOPT_NOP << 16) |
701                                           (TCPOPT_MD5SIG << 8) |
702                                           TCPOLEN_MD5SIG);
703                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
704                 rep.th.doff = arg.iov[0].iov_len/4;
705
706                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
707                                     key, ip_hdr(skb)->saddr,
708                                     ip_hdr(skb)->daddr, &rep.th);
709         }
710 #endif
711         arg.flags = reply_flags;
712         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
713                                       ip_hdr(skb)->saddr, /* XXX */
714                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
715         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
716         if (oif)
717                 arg.bound_dev_if = oif;
718
719         ip_send_reply(net->ipv4.tcp_sock, skb,
720                       &arg, arg.iov[0].iov_len);
721
722         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
723 }
724
725 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
726 {
727         struct inet_timewait_sock *tw = inet_twsk(sk);
728         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
729
730         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
731                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
732                         tcptw->tw_ts_recent,
733                         tw->tw_bound_dev_if,
734                         tcp_twsk_md5_key(tcptw),
735                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
736                         );
737
738         inet_twsk_put(tw);
739 }
740
741 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
742                                   struct request_sock *req)
743 {
744         tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
745                         tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
746                         req->ts_recent,
747                         0,
748                         tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
749                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
750 }
751
752 /*
753  *      Send a SYN-ACK after having received a SYN.
754  *      This still operates on a request_sock only, not on a big
755  *      socket.
756  */
757 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
758                               struct request_sock *req,
759                               struct request_values *rvp)
760 {
761         const struct inet_request_sock *ireq = inet_rsk(req);
762         int err = -1;
763         struct sk_buff * skb;
764
765         /* First, grab a route. */
766         if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
767                 return -1;
768
769         skb = tcp_make_synack(sk, dst, req, rvp);
770
771         if (skb) {
772                 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
773
774                 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
775                                             ireq->rmt_addr,
776                                             ireq->opt);
777                 err = net_xmit_eval(err);
778         }
779
780         dst_release(dst);
781         return err;
782 }
783
784 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
785                               struct request_values *rvp)
786 {
787         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
788         return tcp_v4_send_synack(sk, NULL, req, rvp);
789 }
790
791 /*
792  *      IPv4 request_sock destructor.
793  */
794 static void tcp_v4_reqsk_destructor(struct request_sock *req)
795 {
796         kfree(inet_rsk(req)->opt);
797 }
798
799 static void syn_flood_warning(const struct sk_buff *skb)
800 {
801         const char *msg;
802
803 #ifdef CONFIG_SYN_COOKIES
804         if (sysctl_tcp_syncookies)
805                 msg = "Sending cookies";
806         else
807 #endif
808                 msg = "Dropping request";
809
810         pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
811                                 ntohs(tcp_hdr(skb)->dest), msg);
812 }
813
814 /*
815  * Save and compile IPv4 options into the request_sock if needed.
816  */
817 static struct ip_options *tcp_v4_save_options(struct sock *sk,
818                                               struct sk_buff *skb)
819 {
820         struct ip_options *opt = &(IPCB(skb)->opt);
821         struct ip_options *dopt = NULL;
822
823         if (opt && opt->optlen) {
824                 int opt_size = optlength(opt);
825                 dopt = kmalloc(opt_size, GFP_ATOMIC);
826                 if (dopt) {
827                         if (ip_options_echo(dopt, skb)) {
828                                 kfree(dopt);
829                                 dopt = NULL;
830                         }
831                 }
832         }
833         return dopt;
834 }
835
836 #ifdef CONFIG_TCP_MD5SIG
837 /*
838  * RFC2385 MD5 checksumming requires a mapping of
839  * IP address->MD5 Key.
840  * We need to maintain these in the sk structure.
841  */
842
843 /* Find the Key structure for an address.  */
844 static struct tcp_md5sig_key *
845                         tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
846 {
847         struct tcp_sock *tp = tcp_sk(sk);
848         int i;
849
850         if (!tp->md5sig_info || !tp->md5sig_info->entries4)
851                 return NULL;
852         for (i = 0; i < tp->md5sig_info->entries4; i++) {
853                 if (tp->md5sig_info->keys4[i].addr == addr)
854                         return &tp->md5sig_info->keys4[i].base;
855         }
856         return NULL;
857 }
858
859 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
860                                          struct sock *addr_sk)
861 {
862         return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
863 }
864 EXPORT_SYMBOL(tcp_v4_md5_lookup);
865
866 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
867                                                       struct request_sock *req)
868 {
869         return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
870 }
871
872 /* This can be called on a newly created socket, from other files */
873 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
874                       u8 *newkey, u8 newkeylen)
875 {
876         /* Add Key to the list */
877         struct tcp_md5sig_key *key;
878         struct tcp_sock *tp = tcp_sk(sk);
879         struct tcp4_md5sig_key *keys;
880
881         key = tcp_v4_md5_do_lookup(sk, addr);
882         if (key) {
883                 /* Pre-existing entry - just update that one. */
884                 kfree(key->key);
885                 key->key = newkey;
886                 key->keylen = newkeylen;
887         } else {
888                 struct tcp_md5sig_info *md5sig;
889
890                 if (!tp->md5sig_info) {
891                         tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
892                                                   GFP_ATOMIC);
893                         if (!tp->md5sig_info) {
894                                 kfree(newkey);
895                                 return -ENOMEM;
896                         }
897                         sk_nocaps_add(sk, NETIF_F_GSO_MASK);
898                 }
899                 if (tcp_alloc_md5sig_pool(sk) == NULL) {
900                         kfree(newkey);
901                         return -ENOMEM;
902                 }
903                 md5sig = tp->md5sig_info;
904
905                 if (md5sig->alloced4 == md5sig->entries4) {
906                         keys = kmalloc((sizeof(*keys) *
907                                         (md5sig->entries4 + 1)), GFP_ATOMIC);
908                         if (!keys) {
909                                 kfree(newkey);
910                                 tcp_free_md5sig_pool();
911                                 return -ENOMEM;
912                         }
913
914                         if (md5sig->entries4)
915                                 memcpy(keys, md5sig->keys4,
916                                        sizeof(*keys) * md5sig->entries4);
917
918                         /* Free old key list, and reference new one */
919                         kfree(md5sig->keys4);
920                         md5sig->keys4 = keys;
921                         md5sig->alloced4++;
922                 }
923                 md5sig->entries4++;
924                 md5sig->keys4[md5sig->entries4 - 1].addr        = addr;
925                 md5sig->keys4[md5sig->entries4 - 1].base.key    = newkey;
926                 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
927         }
928         return 0;
929 }
930 EXPORT_SYMBOL(tcp_v4_md5_do_add);
931
932 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
933                                u8 *newkey, u8 newkeylen)
934 {
935         return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
936                                  newkey, newkeylen);
937 }
938
939 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
940 {
941         struct tcp_sock *tp = tcp_sk(sk);
942         int i;
943
944         for (i = 0; i < tp->md5sig_info->entries4; i++) {
945                 if (tp->md5sig_info->keys4[i].addr == addr) {
946                         /* Free the key */
947                         kfree(tp->md5sig_info->keys4[i].base.key);
948                         tp->md5sig_info->entries4--;
949
950                         if (tp->md5sig_info->entries4 == 0) {
951                                 kfree(tp->md5sig_info->keys4);
952                                 tp->md5sig_info->keys4 = NULL;
953                                 tp->md5sig_info->alloced4 = 0;
954                         } else if (tp->md5sig_info->entries4 != i) {
955                                 /* Need to do some manipulation */
956                                 memmove(&tp->md5sig_info->keys4[i],
957                                         &tp->md5sig_info->keys4[i+1],
958                                         (tp->md5sig_info->entries4 - i) *
959                                          sizeof(struct tcp4_md5sig_key));
960                         }
961                         tcp_free_md5sig_pool();
962                         return 0;
963                 }
964         }
965         return -ENOENT;
966 }
967 EXPORT_SYMBOL(tcp_v4_md5_do_del);
968
969 static void tcp_v4_clear_md5_list(struct sock *sk)
970 {
971         struct tcp_sock *tp = tcp_sk(sk);
972
973         /* Free each key, then the set of key keys,
974          * the crypto element, and then decrement our
975          * hold on the last resort crypto.
976          */
977         if (tp->md5sig_info->entries4) {
978                 int i;
979                 for (i = 0; i < tp->md5sig_info->entries4; i++)
980                         kfree(tp->md5sig_info->keys4[i].base.key);
981                 tp->md5sig_info->entries4 = 0;
982                 tcp_free_md5sig_pool();
983         }
984         if (tp->md5sig_info->keys4) {
985                 kfree(tp->md5sig_info->keys4);
986                 tp->md5sig_info->keys4 = NULL;
987                 tp->md5sig_info->alloced4  = 0;
988         }
989 }
990
991 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
992                                  int optlen)
993 {
994         struct tcp_md5sig cmd;
995         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
996         u8 *newkey;
997
998         if (optlen < sizeof(cmd))
999                 return -EINVAL;
1000
1001         if (copy_from_user(&cmd, optval, sizeof(cmd)))
1002                 return -EFAULT;
1003
1004         if (sin->sin_family != AF_INET)
1005                 return -EINVAL;
1006
1007         if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1008                 if (!tcp_sk(sk)->md5sig_info)
1009                         return -ENOENT;
1010                 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1011         }
1012
1013         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1014                 return -EINVAL;
1015
1016         if (!tcp_sk(sk)->md5sig_info) {
1017                 struct tcp_sock *tp = tcp_sk(sk);
1018                 struct tcp_md5sig_info *p;
1019
1020                 p = kzalloc(sizeof(*p), sk->sk_allocation);
1021                 if (!p)
1022                         return -EINVAL;
1023
1024                 tp->md5sig_info = p;
1025                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1026         }
1027
1028         newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1029         if (!newkey)
1030                 return -ENOMEM;
1031         return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1032                                  newkey, cmd.tcpm_keylen);
1033 }
1034
1035 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1036                                         __be32 daddr, __be32 saddr, int nbytes)
1037 {
1038         struct tcp4_pseudohdr *bp;
1039         struct scatterlist sg;
1040
1041         bp = &hp->md5_blk.ip4;
1042
1043         /*
1044          * 1. the TCP pseudo-header (in the order: source IP address,
1045          * destination IP address, zero-padded protocol number, and
1046          * segment length)
1047          */
1048         bp->saddr = saddr;
1049         bp->daddr = daddr;
1050         bp->pad = 0;
1051         bp->protocol = IPPROTO_TCP;
1052         bp->len = cpu_to_be16(nbytes);
1053
1054         sg_init_one(&sg, bp, sizeof(*bp));
1055         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1056 }
1057
1058 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1059                                __be32 daddr, __be32 saddr, struct tcphdr *th)
1060 {
1061         struct tcp_md5sig_pool *hp;
1062         struct hash_desc *desc;
1063
1064         hp = tcp_get_md5sig_pool();
1065         if (!hp)
1066                 goto clear_hash_noput;
1067         desc = &hp->md5_desc;
1068
1069         if (crypto_hash_init(desc))
1070                 goto clear_hash;
1071         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1072                 goto clear_hash;
1073         if (tcp_md5_hash_header(hp, th))
1074                 goto clear_hash;
1075         if (tcp_md5_hash_key(hp, key))
1076                 goto clear_hash;
1077         if (crypto_hash_final(desc, md5_hash))
1078                 goto clear_hash;
1079
1080         tcp_put_md5sig_pool();
1081         return 0;
1082
1083 clear_hash:
1084         tcp_put_md5sig_pool();
1085 clear_hash_noput:
1086         memset(md5_hash, 0, 16);
1087         return 1;
1088 }
1089
1090 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1091                         struct sock *sk, struct request_sock *req,
1092                         struct sk_buff *skb)
1093 {
1094         struct tcp_md5sig_pool *hp;
1095         struct hash_desc *desc;
1096         struct tcphdr *th = tcp_hdr(skb);
1097         __be32 saddr, daddr;
1098
1099         if (sk) {
1100                 saddr = inet_sk(sk)->inet_saddr;
1101                 daddr = inet_sk(sk)->inet_daddr;
1102         } else if (req) {
1103                 saddr = inet_rsk(req)->loc_addr;
1104                 daddr = inet_rsk(req)->rmt_addr;
1105         } else {
1106                 const struct iphdr *iph = ip_hdr(skb);
1107                 saddr = iph->saddr;
1108                 daddr = iph->daddr;
1109         }
1110
1111         hp = tcp_get_md5sig_pool();
1112         if (!hp)
1113                 goto clear_hash_noput;
1114         desc = &hp->md5_desc;
1115
1116         if (crypto_hash_init(desc))
1117                 goto clear_hash;
1118
1119         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1120                 goto clear_hash;
1121         if (tcp_md5_hash_header(hp, th))
1122                 goto clear_hash;
1123         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1124                 goto clear_hash;
1125         if (tcp_md5_hash_key(hp, key))
1126                 goto clear_hash;
1127         if (crypto_hash_final(desc, md5_hash))
1128                 goto clear_hash;
1129
1130         tcp_put_md5sig_pool();
1131         return 0;
1132
1133 clear_hash:
1134         tcp_put_md5sig_pool();
1135 clear_hash_noput:
1136         memset(md5_hash, 0, 16);
1137         return 1;
1138 }
1139 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1140
1141 static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1142 {
1143         /*
1144          * This gets called for each TCP segment that arrives
1145          * so we want to be efficient.
1146          * We have 3 drop cases:
1147          * o No MD5 hash and one expected.
1148          * o MD5 hash and we're not expecting one.
1149          * o MD5 hash and its wrong.
1150          */
1151         __u8 *hash_location = NULL;
1152         struct tcp_md5sig_key *hash_expected;
1153         const struct iphdr *iph = ip_hdr(skb);
1154         struct tcphdr *th = tcp_hdr(skb);
1155         int genhash;
1156         unsigned char newhash[16];
1157
1158         hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1159         hash_location = tcp_parse_md5sig_option(th);
1160
1161         /* We've parsed the options - do we have a hash? */
1162         if (!hash_expected && !hash_location)
1163                 return 0;
1164
1165         if (hash_expected && !hash_location) {
1166                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1167                 return 1;
1168         }
1169
1170         if (!hash_expected && hash_location) {
1171                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1172                 return 1;
1173         }
1174
1175         /* Okay, so this is hash_expected and hash_location -
1176          * so we need to calculate the checksum.
1177          */
1178         genhash = tcp_v4_md5_hash_skb(newhash,
1179                                       hash_expected,
1180                                       NULL, NULL, skb);
1181
1182         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1183                 if (net_ratelimit()) {
1184                         printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1185                                &iph->saddr, ntohs(th->source),
1186                                &iph->daddr, ntohs(th->dest),
1187                                genhash ? " tcp_v4_calc_md5_hash failed" : "");
1188                 }
1189                 return 1;
1190         }
1191         return 0;
1192 }
1193
1194 #endif
1195
1196 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1197         .family         =       PF_INET,
1198         .obj_size       =       sizeof(struct tcp_request_sock),
1199         .rtx_syn_ack    =       tcp_v4_rtx_synack,
1200         .send_ack       =       tcp_v4_reqsk_send_ack,
1201         .destructor     =       tcp_v4_reqsk_destructor,
1202         .send_reset     =       tcp_v4_send_reset,
1203         .syn_ack_timeout =      tcp_syn_ack_timeout,
1204 };
1205
1206 #ifdef CONFIG_TCP_MD5SIG
1207 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1208         .md5_lookup     =       tcp_v4_reqsk_md5_lookup,
1209         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1210 };
1211 #endif
1212
1213 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1214         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1215         .twsk_unique    = tcp_twsk_unique,
1216         .twsk_destructor= tcp_twsk_destructor,
1217 };
1218
1219 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1220 {
1221         struct tcp_extend_values tmp_ext;
1222         struct tcp_options_received tmp_opt;
1223         u8 *hash_location;
1224         struct request_sock *req;
1225         struct inet_request_sock *ireq;
1226         struct tcp_sock *tp = tcp_sk(sk);
1227         struct dst_entry *dst = NULL;
1228         __be32 saddr = ip_hdr(skb)->saddr;
1229         __be32 daddr = ip_hdr(skb)->daddr;
1230         __u32 isn = TCP_SKB_CB(skb)->when;
1231 #ifdef CONFIG_SYN_COOKIES
1232         int want_cookie = 0;
1233 #else
1234 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1235 #endif
1236
1237         /* Never answer to SYNs send to broadcast or multicast */
1238         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1239                 goto drop;
1240
1241         /* TW buckets are converted to open requests without
1242          * limitations, they conserve resources and peer is
1243          * evidently real one.
1244          */
1245         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1246                 if (net_ratelimit())
1247                         syn_flood_warning(skb);
1248 #ifdef CONFIG_SYN_COOKIES
1249                 if (sysctl_tcp_syncookies) {
1250                         want_cookie = 1;
1251                 } else
1252 #endif
1253                 goto drop;
1254         }
1255
1256         /* Accept backlog is full. If we have already queued enough
1257          * of warm entries in syn queue, drop request. It is better than
1258          * clogging syn queue with openreqs with exponentially increasing
1259          * timeout.
1260          */
1261         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1262                 goto drop;
1263
1264         req = inet_reqsk_alloc(&tcp_request_sock_ops);
1265         if (!req)
1266                 goto drop;
1267
1268 #ifdef CONFIG_TCP_MD5SIG
1269         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1270 #endif
1271
1272         tcp_clear_options(&tmp_opt);
1273         tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1274         tmp_opt.user_mss  = tp->rx_opt.user_mss;
1275         tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1276
1277         if (tmp_opt.cookie_plus > 0 &&
1278             tmp_opt.saw_tstamp &&
1279             !tp->rx_opt.cookie_out_never &&
1280             (sysctl_tcp_cookie_size > 0 ||
1281              (tp->cookie_values != NULL &&
1282               tp->cookie_values->cookie_desired > 0))) {
1283                 u8 *c;
1284                 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1285                 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1286
1287                 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1288                         goto drop_and_release;
1289
1290                 /* Secret recipe starts with IP addresses */
1291                 *mess++ ^= (__force u32)daddr;
1292                 *mess++ ^= (__force u32)saddr;
1293
1294                 /* plus variable length Initiator Cookie */
1295                 c = (u8 *)mess;
1296                 while (l-- > 0)
1297                         *c++ ^= *hash_location++;
1298
1299 #ifdef CONFIG_SYN_COOKIES
1300                 want_cookie = 0;        /* not our kind of cookie */
1301 #endif
1302                 tmp_ext.cookie_out_never = 0; /* false */
1303                 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1304         } else if (!tp->rx_opt.cookie_in_always) {
1305                 /* redundant indications, but ensure initialization. */
1306                 tmp_ext.cookie_out_never = 1; /* true */
1307                 tmp_ext.cookie_plus = 0;
1308         } else {
1309                 goto drop_and_release;
1310         }
1311         tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1312
1313         if (want_cookie && !tmp_opt.saw_tstamp)
1314                 tcp_clear_options(&tmp_opt);
1315
1316         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1317         tcp_openreq_init(req, &tmp_opt, skb);
1318
1319         ireq = inet_rsk(req);
1320         ireq->loc_addr = daddr;
1321         ireq->rmt_addr = saddr;
1322         ireq->no_srccheck = inet_sk(sk)->transparent;
1323         ireq->opt = tcp_v4_save_options(sk, skb);
1324
1325         if (security_inet_conn_request(sk, skb, req))
1326                 goto drop_and_free;
1327
1328         if (!want_cookie || tmp_opt.tstamp_ok)
1329                 TCP_ECN_create_request(req, tcp_hdr(skb));
1330
1331         if (want_cookie) {
1332                 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1333                 req->cookie_ts = tmp_opt.tstamp_ok;
1334         } else if (!isn) {
1335                 struct inet_peer *peer = NULL;
1336
1337                 /* VJ's idea. We save last timestamp seen
1338                  * from the destination in peer table, when entering
1339                  * state TIME-WAIT, and check against it before
1340                  * accepting new connection request.
1341                  *
1342                  * If "isn" is not zero, this request hit alive
1343                  * timewait bucket, so that all the necessary checks
1344                  * are made in the function processing timewait state.
1345                  */
1346                 if (tmp_opt.saw_tstamp &&
1347                     tcp_death_row.sysctl_tw_recycle &&
1348                     (dst = inet_csk_route_req(sk, req)) != NULL &&
1349                     (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1350                     peer->v4daddr == saddr) {
1351                         inet_peer_refcheck(peer);
1352                         if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1353                             (s32)(peer->tcp_ts - req->ts_recent) >
1354                                                         TCP_PAWS_WINDOW) {
1355                                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1356                                 goto drop_and_release;
1357                         }
1358                 }
1359                 /* Kill the following clause, if you dislike this way. */
1360                 else if (!sysctl_tcp_syncookies &&
1361                          (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1362                           (sysctl_max_syn_backlog >> 2)) &&
1363                          (!peer || !peer->tcp_ts_stamp) &&
1364                          (!dst || !dst_metric(dst, RTAX_RTT))) {
1365                         /* Without syncookies last quarter of
1366                          * backlog is filled with destinations,
1367                          * proven to be alive.
1368                          * It means that we continue to communicate
1369                          * to destinations, already remembered
1370                          * to the moment of synflood.
1371                          */
1372                         LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1373                                        &saddr, ntohs(tcp_hdr(skb)->source));
1374                         goto drop_and_release;
1375                 }
1376
1377                 isn = tcp_v4_init_sequence(skb);
1378         }
1379         tcp_rsk(req)->snt_isn = isn;
1380
1381         if (tcp_v4_send_synack(sk, dst, req,
1382                                (struct request_values *)&tmp_ext) ||
1383             want_cookie)
1384                 goto drop_and_free;
1385
1386         inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1387         return 0;
1388
1389 drop_and_release:
1390         dst_release(dst);
1391 drop_and_free:
1392         reqsk_free(req);
1393 drop:
1394         return 0;
1395 }
1396 EXPORT_SYMBOL(tcp_v4_conn_request);
1397
1398
1399 /*
1400  * The three way handshake has completed - we got a valid synack -
1401  * now create the new socket.
1402  */
1403 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1404                                   struct request_sock *req,
1405                                   struct dst_entry *dst)
1406 {
1407         struct inet_request_sock *ireq;
1408         struct inet_sock *newinet;
1409         struct tcp_sock *newtp;
1410         struct sock *newsk;
1411 #ifdef CONFIG_TCP_MD5SIG
1412         struct tcp_md5sig_key *key;
1413 #endif
1414
1415         if (sk_acceptq_is_full(sk))
1416                 goto exit_overflow;
1417
1418         if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1419                 goto exit;
1420
1421         newsk = tcp_create_openreq_child(sk, req, skb);
1422         if (!newsk)
1423                 goto exit;
1424
1425         newsk->sk_gso_type = SKB_GSO_TCPV4;
1426         sk_setup_caps(newsk, dst);
1427
1428         newtp                 = tcp_sk(newsk);
1429         newinet               = inet_sk(newsk);
1430         ireq                  = inet_rsk(req);
1431         newinet->inet_daddr   = ireq->rmt_addr;
1432         newinet->inet_rcv_saddr = ireq->loc_addr;
1433         newinet->inet_saddr           = ireq->loc_addr;
1434         newinet->opt          = ireq->opt;
1435         ireq->opt             = NULL;
1436         newinet->mc_index     = inet_iif(skb);
1437         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1438         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1439         if (newinet->opt)
1440                 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1441         newinet->inet_id = newtp->write_seq ^ jiffies;
1442
1443         tcp_mtup_init(newsk);
1444         tcp_sync_mss(newsk, dst_mtu(dst));
1445         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1446         if (tcp_sk(sk)->rx_opt.user_mss &&
1447             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1448                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1449
1450         tcp_initialize_rcv_mss(newsk);
1451
1452 #ifdef CONFIG_TCP_MD5SIG
1453         /* Copy over the MD5 key from the original socket */
1454         key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1455         if (key != NULL) {
1456                 /*
1457                  * We're using one, so create a matching key
1458                  * on the newsk structure. If we fail to get
1459                  * memory, then we end up not copying the key
1460                  * across. Shucks.
1461                  */
1462                 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1463                 if (newkey != NULL)
1464                         tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1465                                           newkey, key->keylen);
1466                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1467         }
1468 #endif
1469
1470         __inet_hash_nolisten(newsk, NULL);
1471         __inet_inherit_port(sk, newsk);
1472
1473         return newsk;
1474
1475 exit_overflow:
1476         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1477 exit:
1478         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1479         dst_release(dst);
1480         return NULL;
1481 }
1482 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1483
1484 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1485 {
1486         struct tcphdr *th = tcp_hdr(skb);
1487         const struct iphdr *iph = ip_hdr(skb);
1488         struct sock *nsk;
1489         struct request_sock **prev;
1490         /* Find possible connection requests. */
1491         struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1492                                                        iph->saddr, iph->daddr);
1493         if (req)
1494                 return tcp_check_req(sk, skb, req, prev);
1495
1496         nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1497                         th->source, iph->daddr, th->dest, inet_iif(skb));
1498
1499         if (nsk) {
1500                 if (nsk->sk_state != TCP_TIME_WAIT) {
1501                         bh_lock_sock(nsk);
1502                         return nsk;
1503                 }
1504                 inet_twsk_put(inet_twsk(nsk));
1505                 return NULL;
1506         }
1507
1508 #ifdef CONFIG_SYN_COOKIES
1509         if (!th->syn)
1510                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1511 #endif
1512         return sk;
1513 }
1514
1515 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1516 {
1517         const struct iphdr *iph = ip_hdr(skb);
1518
1519         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1520                 if (!tcp_v4_check(skb->len, iph->saddr,
1521                                   iph->daddr, skb->csum)) {
1522                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1523                         return 0;
1524                 }
1525         }
1526
1527         skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1528                                        skb->len, IPPROTO_TCP, 0);
1529
1530         if (skb->len <= 76) {
1531                 return __skb_checksum_complete(skb);
1532         }
1533         return 0;
1534 }
1535
1536
1537 /* The socket must have it's spinlock held when we get
1538  * here.
1539  *
1540  * We have a potential double-lock case here, so even when
1541  * doing backlog processing we use the BH locking scheme.
1542  * This is because we cannot sleep with the original spinlock
1543  * held.
1544  */
1545 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1546 {
1547         struct sock *rsk;
1548 #ifdef CONFIG_TCP_MD5SIG
1549         /*
1550          * We really want to reject the packet as early as possible
1551          * if:
1552          *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1553          *  o There is an MD5 option and we're not expecting one
1554          */
1555         if (tcp_v4_inbound_md5_hash(sk, skb))
1556                 goto discard;
1557 #endif
1558
1559         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1560                 sock_rps_save_rxhash(sk, skb->rxhash);
1561                 TCP_CHECK_TIMER(sk);
1562                 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1563                         rsk = sk;
1564                         goto reset;
1565                 }
1566                 TCP_CHECK_TIMER(sk);
1567                 return 0;
1568         }
1569
1570         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1571                 goto csum_err;
1572
1573         if (sk->sk_state == TCP_LISTEN) {
1574                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1575                 if (!nsk)
1576                         goto discard;
1577
1578                 if (nsk != sk) {
1579                         if (tcp_child_process(sk, nsk, skb)) {
1580                                 rsk = nsk;
1581                                 goto reset;
1582                         }
1583                         return 0;
1584                 }
1585         } else
1586                 sock_rps_save_rxhash(sk, skb->rxhash);
1587
1588
1589         TCP_CHECK_TIMER(sk);
1590         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1591                 rsk = sk;
1592                 goto reset;
1593         }
1594         TCP_CHECK_TIMER(sk);
1595         return 0;
1596
1597 reset:
1598         tcp_v4_send_reset(rsk, skb);
1599 discard:
1600         kfree_skb(skb);
1601         /* Be careful here. If this function gets more complicated and
1602          * gcc suffers from register pressure on the x86, sk (in %ebx)
1603          * might be destroyed here. This current version compiles correctly,
1604          * but you have been warned.
1605          */
1606         return 0;
1607
1608 csum_err:
1609         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1610         goto discard;
1611 }
1612 EXPORT_SYMBOL(tcp_v4_do_rcv);
1613
1614 /*
1615  *      From tcp_input.c
1616  */
1617
1618 int tcp_v4_rcv(struct sk_buff *skb)
1619 {
1620         const struct iphdr *iph;
1621         struct tcphdr *th;
1622         struct sock *sk;
1623         int ret;
1624         struct net *net = dev_net(skb->dev);
1625
1626         if (skb->pkt_type != PACKET_HOST)
1627                 goto discard_it;
1628
1629         /* Count it even if it's bad */
1630         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1631
1632         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1633                 goto discard_it;
1634
1635         th = tcp_hdr(skb);
1636
1637         if (th->doff < sizeof(struct tcphdr) / 4)
1638                 goto bad_packet;
1639         if (!pskb_may_pull(skb, th->doff * 4))
1640                 goto discard_it;
1641
1642         /* An explanation is required here, I think.
1643          * Packet length and doff are validated by header prediction,
1644          * provided case of th->doff==0 is eliminated.
1645          * So, we defer the checks. */
1646         if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1647                 goto bad_packet;
1648
1649         th = tcp_hdr(skb);
1650         iph = ip_hdr(skb);
1651         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1652         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1653                                     skb->len - th->doff * 4);
1654         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1655         TCP_SKB_CB(skb)->when    = 0;
1656         TCP_SKB_CB(skb)->flags   = iph->tos;
1657         TCP_SKB_CB(skb)->sacked  = 0;
1658
1659         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1660         if (!sk)
1661                 goto no_tcp_socket;
1662
1663 process:
1664         if (sk->sk_state == TCP_TIME_WAIT)
1665                 goto do_time_wait;
1666
1667         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1668                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1669                 goto discard_and_relse;
1670         }
1671
1672         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1673                 goto discard_and_relse;
1674         nf_reset(skb);
1675
1676         if (sk_filter(sk, skb))
1677                 goto discard_and_relse;
1678
1679         skb->dev = NULL;
1680
1681         bh_lock_sock_nested(sk);
1682         ret = 0;
1683         if (!sock_owned_by_user(sk)) {
1684 #ifdef CONFIG_NET_DMA
1685                 struct tcp_sock *tp = tcp_sk(sk);
1686                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1687                         tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1688                 if (tp->ucopy.dma_chan)
1689                         ret = tcp_v4_do_rcv(sk, skb);
1690                 else
1691 #endif
1692                 {
1693                         if (!tcp_prequeue(sk, skb))
1694                                 ret = tcp_v4_do_rcv(sk, skb);
1695                 }
1696         } else if (unlikely(sk_add_backlog(sk, skb))) {
1697                 bh_unlock_sock(sk);
1698                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1699                 goto discard_and_relse;
1700         }
1701         bh_unlock_sock(sk);
1702
1703         sock_put(sk);
1704
1705         return ret;
1706
1707 no_tcp_socket:
1708         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1709                 goto discard_it;
1710
1711         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1712 bad_packet:
1713                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1714         } else {
1715                 tcp_v4_send_reset(NULL, skb);
1716         }
1717
1718 discard_it:
1719         /* Discard frame. */
1720         kfree_skb(skb);
1721         return 0;
1722
1723 discard_and_relse:
1724         sock_put(sk);
1725         goto discard_it;
1726
1727 do_time_wait:
1728         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1729                 inet_twsk_put(inet_twsk(sk));
1730                 goto discard_it;
1731         }
1732
1733         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1734                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1735                 inet_twsk_put(inet_twsk(sk));
1736                 goto discard_it;
1737         }
1738         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1739         case TCP_TW_SYN: {
1740                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1741                                                         &tcp_hashinfo,
1742                                                         iph->daddr, th->dest,
1743                                                         inet_iif(skb));
1744                 if (sk2) {
1745                         inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1746                         inet_twsk_put(inet_twsk(sk));
1747                         sk = sk2;
1748                         goto process;
1749                 }
1750                 /* Fall through to ACK */
1751         }
1752         case TCP_TW_ACK:
1753                 tcp_v4_timewait_ack(sk, skb);
1754                 break;
1755         case TCP_TW_RST:
1756                 goto no_tcp_socket;
1757         case TCP_TW_SUCCESS:;
1758         }
1759         goto discard_it;
1760 }
1761
1762 /* VJ's idea. Save last timestamp seen from this destination
1763  * and hold it at least for normal timewait interval to use for duplicate
1764  * segment detection in subsequent connections, before they enter synchronized
1765  * state.
1766  */
1767
1768 int tcp_v4_remember_stamp(struct sock *sk)
1769 {
1770         struct inet_sock *inet = inet_sk(sk);
1771         struct tcp_sock *tp = tcp_sk(sk);
1772         struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1773         struct inet_peer *peer = NULL;
1774         int release_it = 0;
1775
1776         if (!rt || rt->rt_dst != inet->inet_daddr) {
1777                 peer = inet_getpeer(inet->inet_daddr, 1);
1778                 release_it = 1;
1779         } else {
1780                 if (!rt->peer)
1781                         rt_bind_peer(rt, 1);
1782                 peer = rt->peer;
1783         }
1784
1785         if (peer) {
1786                 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1787                     ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1788                      peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1789                         peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1790                         peer->tcp_ts = tp->rx_opt.ts_recent;
1791                 }
1792                 if (release_it)
1793                         inet_putpeer(peer);
1794                 return 1;
1795         }
1796
1797         return 0;
1798 }
1799 EXPORT_SYMBOL(tcp_v4_remember_stamp);
1800
1801 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1802 {
1803         struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1804
1805         if (peer) {
1806                 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1807
1808                 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1809                     ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1810                      peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1811                         peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1812                         peer->tcp_ts       = tcptw->tw_ts_recent;
1813                 }
1814                 inet_putpeer(peer);
1815                 return 1;
1816         }
1817
1818         return 0;
1819 }
1820
1821 const struct inet_connection_sock_af_ops ipv4_specific = {
1822         .queue_xmit        = ip_queue_xmit,
1823         .send_check        = tcp_v4_send_check,
1824         .rebuild_header    = inet_sk_rebuild_header,
1825         .conn_request      = tcp_v4_conn_request,
1826         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1827         .remember_stamp    = tcp_v4_remember_stamp,
1828         .net_header_len    = sizeof(struct iphdr),
1829         .setsockopt        = ip_setsockopt,
1830         .getsockopt        = ip_getsockopt,
1831         .addr2sockaddr     = inet_csk_addr2sockaddr,
1832         .sockaddr_len      = sizeof(struct sockaddr_in),
1833         .bind_conflict     = inet_csk_bind_conflict,
1834 #ifdef CONFIG_COMPAT
1835         .compat_setsockopt = compat_ip_setsockopt,
1836         .compat_getsockopt = compat_ip_getsockopt,
1837 #endif
1838 };
1839 EXPORT_SYMBOL(ipv4_specific);
1840
1841 #ifdef CONFIG_TCP_MD5SIG
1842 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1843         .md5_lookup             = tcp_v4_md5_lookup,
1844         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1845         .md5_add                = tcp_v4_md5_add_func,
1846         .md5_parse              = tcp_v4_parse_md5_keys,
1847 };
1848 #endif
1849
1850 /* NOTE: A lot of things set to zero explicitly by call to
1851  *       sk_alloc() so need not be done here.
1852  */
1853 static int tcp_v4_init_sock(struct sock *sk)
1854 {
1855         struct inet_connection_sock *icsk = inet_csk(sk);
1856         struct tcp_sock *tp = tcp_sk(sk);
1857
1858         skb_queue_head_init(&tp->out_of_order_queue);
1859         tcp_init_xmit_timers(sk);
1860         tcp_prequeue_init(tp);
1861
1862         icsk->icsk_rto = TCP_TIMEOUT_INIT;
1863         tp->mdev = TCP_TIMEOUT_INIT;
1864
1865         /* So many TCP implementations out there (incorrectly) count the
1866          * initial SYN frame in their delayed-ACK and congestion control
1867          * algorithms that we must have the following bandaid to talk
1868          * efficiently to them.  -DaveM
1869          */
1870         tp->snd_cwnd = 2;
1871
1872         /* See draft-stevens-tcpca-spec-01 for discussion of the
1873          * initialization of these values.
1874          */
1875         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1876         tp->snd_cwnd_clamp = ~0;
1877         tp->mss_cache = TCP_MSS_DEFAULT;
1878
1879         tp->reordering = sysctl_tcp_reordering;
1880         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1881
1882         sk->sk_state = TCP_CLOSE;
1883
1884         sk->sk_write_space = sk_stream_write_space;
1885         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1886
1887         icsk->icsk_af_ops = &ipv4_specific;
1888         icsk->icsk_sync_mss = tcp_sync_mss;
1889 #ifdef CONFIG_TCP_MD5SIG
1890         tp->af_specific = &tcp_sock_ipv4_specific;
1891 #endif
1892
1893         /* TCP Cookie Transactions */
1894         if (sysctl_tcp_cookie_size > 0) {
1895                 /* Default, cookies without s_data_payload. */
1896                 tp->cookie_values =
1897                         kzalloc(sizeof(*tp->cookie_values),
1898                                 sk->sk_allocation);
1899                 if (tp->cookie_values != NULL)
1900                         kref_init(&tp->cookie_values->kref);
1901         }
1902         /* Presumed zeroed, in order of appearance:
1903          *      cookie_in_always, cookie_out_never,
1904          *      s_data_constant, s_data_in, s_data_out
1905          */
1906         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1907         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1908
1909         local_bh_disable();
1910         percpu_counter_inc(&tcp_sockets_allocated);
1911         local_bh_enable();
1912
1913         return 0;
1914 }
1915
1916 void tcp_v4_destroy_sock(struct sock *sk)
1917 {
1918         struct tcp_sock *tp = tcp_sk(sk);
1919
1920         tcp_clear_xmit_timers(sk);
1921
1922         tcp_cleanup_congestion_control(sk);
1923
1924         /* Cleanup up the write buffer. */
1925         tcp_write_queue_purge(sk);
1926
1927         /* Cleans up our, hopefully empty, out_of_order_queue. */
1928         __skb_queue_purge(&tp->out_of_order_queue);
1929
1930 #ifdef CONFIG_TCP_MD5SIG
1931         /* Clean up the MD5 key list, if any */
1932         if (tp->md5sig_info) {
1933                 tcp_v4_clear_md5_list(sk);
1934                 kfree(tp->md5sig_info);
1935                 tp->md5sig_info = NULL;
1936         }
1937 #endif
1938
1939 #ifdef CONFIG_NET_DMA
1940         /* Cleans up our sk_async_wait_queue */
1941         __skb_queue_purge(&sk->sk_async_wait_queue);
1942 #endif
1943
1944         /* Clean prequeue, it must be empty really */
1945         __skb_queue_purge(&tp->ucopy.prequeue);
1946
1947         /* Clean up a referenced TCP bind bucket. */
1948         if (inet_csk(sk)->icsk_bind_hash)
1949                 inet_put_port(sk);
1950
1951         /*
1952          * If sendmsg cached page exists, toss it.
1953          */
1954         if (sk->sk_sndmsg_page) {
1955                 __free_page(sk->sk_sndmsg_page);
1956                 sk->sk_sndmsg_page = NULL;
1957         }
1958
1959         /* TCP Cookie Transactions */
1960         if (tp->cookie_values != NULL) {
1961                 kref_put(&tp->cookie_values->kref,
1962                          tcp_cookie_values_release);
1963                 tp->cookie_values = NULL;
1964         }
1965
1966         percpu_counter_dec(&tcp_sockets_allocated);
1967 }
1968 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1969
1970 #ifdef CONFIG_PROC_FS
1971 /* Proc filesystem TCP sock list dumping. */
1972
1973 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1974 {
1975         return hlist_nulls_empty(head) ? NULL :
1976                 list_entry(head->first, struct inet_timewait_sock, tw_node);
1977 }
1978
1979 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1980 {
1981         return !is_a_nulls(tw->tw_node.next) ?
1982                 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1983 }
1984
1985 /*
1986  * Get next listener socket follow cur.  If cur is NULL, get first socket
1987  * starting from bucket given in st->bucket; when st->bucket is zero the
1988  * very first socket in the hash table is returned.
1989  */
1990 static void *listening_get_next(struct seq_file *seq, void *cur)
1991 {
1992         struct inet_connection_sock *icsk;
1993         struct hlist_nulls_node *node;
1994         struct sock *sk = cur;
1995         struct inet_listen_hashbucket *ilb;
1996         struct tcp_iter_state *st = seq->private;
1997         struct net *net = seq_file_net(seq);
1998
1999         if (!sk) {
2000                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2001                 spin_lock_bh(&ilb->lock);
2002                 sk = sk_nulls_head(&ilb->head);
2003                 st->offset = 0;
2004                 goto get_sk;
2005         }
2006         ilb = &tcp_hashinfo.listening_hash[st->bucket];
2007         ++st->num;
2008         ++st->offset;
2009
2010         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2011                 struct request_sock *req = cur;
2012
2013                 icsk = inet_csk(st->syn_wait_sk);
2014                 req = req->dl_next;
2015                 while (1) {
2016                         while (req) {
2017                                 if (req->rsk_ops->family == st->family) {
2018                                         cur = req;
2019                                         goto out;
2020                                 }
2021                                 req = req->dl_next;
2022                         }
2023                         st->offset = 0;
2024                         if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2025                                 break;
2026 get_req:
2027                         req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2028                 }
2029                 sk        = sk_next(st->syn_wait_sk);
2030                 st->state = TCP_SEQ_STATE_LISTENING;
2031                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2032         } else {
2033                 icsk = inet_csk(sk);
2034                 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2035                 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2036                         goto start_req;
2037                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2038                 sk = sk_next(sk);
2039         }
2040 get_sk:
2041         sk_nulls_for_each_from(sk, node) {
2042                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
2043                         cur = sk;
2044                         goto out;
2045                 }
2046                 icsk = inet_csk(sk);
2047                 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2048                 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2049 start_req:
2050                         st->uid         = sock_i_uid(sk);
2051                         st->syn_wait_sk = sk;
2052                         st->state       = TCP_SEQ_STATE_OPENREQ;
2053                         st->sbucket     = 0;
2054                         goto get_req;
2055                 }
2056                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2057         }
2058         spin_unlock_bh(&ilb->lock);
2059         st->offset = 0;
2060         if (++st->bucket < INET_LHTABLE_SIZE) {
2061                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2062                 spin_lock_bh(&ilb->lock);
2063                 sk = sk_nulls_head(&ilb->head);
2064                 goto get_sk;
2065         }
2066         cur = NULL;
2067 out:
2068         return cur;
2069 }
2070
2071 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2072 {
2073         struct tcp_iter_state *st = seq->private;
2074         void *rc;
2075
2076         st->bucket = 0;
2077         st->offset = 0;
2078         rc = listening_get_next(seq, NULL);
2079
2080         while (rc && *pos) {
2081                 rc = listening_get_next(seq, rc);
2082                 --*pos;
2083         }
2084         return rc;
2085 }
2086
2087 static inline int empty_bucket(struct tcp_iter_state *st)
2088 {
2089         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2090                 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2091 }
2092
2093 /*
2094  * Get first established socket starting from bucket given in st->bucket.
2095  * If st->bucket is zero, the very first socket in the hash is returned.
2096  */
2097 static void *established_get_first(struct seq_file *seq)
2098 {
2099         struct tcp_iter_state *st = seq->private;
2100         struct net *net = seq_file_net(seq);
2101         void *rc = NULL;
2102
2103         st->offset = 0;
2104         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2105                 struct sock *sk;
2106                 struct hlist_nulls_node *node;
2107                 struct inet_timewait_sock *tw;
2108                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2109
2110                 /* Lockless fast path for the common case of empty buckets */
2111                 if (empty_bucket(st))
2112                         continue;
2113
2114                 spin_lock_bh(lock);
2115                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2116                         if (sk->sk_family != st->family ||
2117                             !net_eq(sock_net(sk), net)) {
2118                                 continue;
2119                         }
2120                         rc = sk;
2121                         goto out;
2122                 }
2123                 st->state = TCP_SEQ_STATE_TIME_WAIT;
2124                 inet_twsk_for_each(tw, node,
2125                                    &tcp_hashinfo.ehash[st->bucket].twchain) {
2126                         if (tw->tw_family != st->family ||
2127                             !net_eq(twsk_net(tw), net)) {
2128                                 continue;
2129                         }
2130                         rc = tw;
2131                         goto out;
2132                 }
2133                 spin_unlock_bh(lock);
2134                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2135         }
2136 out:
2137         return rc;
2138 }
2139
2140 static void *established_get_next(struct seq_file *seq, void *cur)
2141 {
2142         struct sock *sk = cur;
2143         struct inet_timewait_sock *tw;
2144         struct hlist_nulls_node *node;
2145         struct tcp_iter_state *st = seq->private;
2146         struct net *net = seq_file_net(seq);
2147
2148         ++st->num;
2149         ++st->offset;
2150
2151         if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2152                 tw = cur;
2153                 tw = tw_next(tw);
2154 get_tw:
2155                 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2156                         tw = tw_next(tw);
2157                 }
2158                 if (tw) {
2159                         cur = tw;
2160                         goto out;
2161                 }
2162                 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2163                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2164
2165                 /* Look for next non empty bucket */
2166                 st->offset = 0;
2167                 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2168                                 empty_bucket(st))
2169                         ;
2170                 if (st->bucket > tcp_hashinfo.ehash_mask)
2171                         return NULL;
2172
2173                 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2174                 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2175         } else
2176                 sk = sk_nulls_next(sk);
2177
2178         sk_nulls_for_each_from(sk, node) {
2179                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2180                         goto found;
2181         }
2182
2183         st->state = TCP_SEQ_STATE_TIME_WAIT;
2184         tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2185         goto get_tw;
2186 found:
2187         cur = sk;
2188 out:
2189         return cur;
2190 }
2191
2192 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2193 {
2194         struct tcp_iter_state *st = seq->private;
2195         void *rc;
2196
2197         st->bucket = 0;
2198         rc = established_get_first(seq);
2199
2200         while (rc && pos) {
2201                 rc = established_get_next(seq, rc);
2202                 --pos;
2203         }
2204         return rc;
2205 }
2206
2207 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2208 {
2209         void *rc;
2210         struct tcp_iter_state *st = seq->private;
2211
2212         st->state = TCP_SEQ_STATE_LISTENING;
2213         rc        = listening_get_idx(seq, &pos);
2214
2215         if (!rc) {
2216                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2217                 rc        = established_get_idx(seq, pos);
2218         }
2219
2220         return rc;
2221 }
2222
2223 static void *tcp_seek_last_pos(struct seq_file *seq)
2224 {
2225         struct tcp_iter_state *st = seq->private;
2226         int offset = st->offset;
2227         int orig_num = st->num;
2228         void *rc = NULL;
2229
2230         switch (st->state) {
2231         case TCP_SEQ_STATE_OPENREQ:
2232         case TCP_SEQ_STATE_LISTENING:
2233                 if (st->bucket >= INET_LHTABLE_SIZE)
2234                         break;
2235                 st->state = TCP_SEQ_STATE_LISTENING;
2236                 rc = listening_get_next(seq, NULL);
2237                 while (offset-- && rc)
2238                         rc = listening_get_next(seq, rc);
2239                 if (rc)
2240                         break;
2241                 st->bucket = 0;
2242                 /* Fallthrough */
2243         case TCP_SEQ_STATE_ESTABLISHED:
2244         case TCP_SEQ_STATE_TIME_WAIT:
2245                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2246                 if (st->bucket > tcp_hashinfo.ehash_mask)
2247                         break;
2248                 rc = established_get_first(seq);
2249                 while (offset-- && rc)
2250                         rc = established_get_next(seq, rc);
2251         }
2252
2253         st->num = orig_num;
2254
2255         return rc;
2256 }
2257
2258 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2259 {
2260         struct tcp_iter_state *st = seq->private;
2261         void *rc;
2262
2263         if (*pos && *pos == st->last_pos) {
2264                 rc = tcp_seek_last_pos(seq);
2265                 if (rc)
2266                         goto out;
2267         }
2268
2269         st->state = TCP_SEQ_STATE_LISTENING;
2270         st->num = 0;
2271         st->bucket = 0;
2272         st->offset = 0;
2273         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2274
2275 out:
2276         st->last_pos = *pos;
2277         return rc;
2278 }
2279
2280 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2281 {
2282         struct tcp_iter_state *st = seq->private;
2283         void *rc = NULL;
2284
2285         if (v == SEQ_START_TOKEN) {
2286                 rc = tcp_get_idx(seq, 0);
2287                 goto out;
2288         }
2289
2290         switch (st->state) {
2291         case TCP_SEQ_STATE_OPENREQ:
2292         case TCP_SEQ_STATE_LISTENING:
2293                 rc = listening_get_next(seq, v);
2294                 if (!rc) {
2295                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2296                         st->bucket = 0;
2297                         st->offset = 0;
2298                         rc        = established_get_first(seq);
2299                 }
2300                 break;
2301         case TCP_SEQ_STATE_ESTABLISHED:
2302         case TCP_SEQ_STATE_TIME_WAIT:
2303                 rc = established_get_next(seq, v);
2304                 break;
2305         }
2306 out:
2307         ++*pos;
2308         st->last_pos = *pos;
2309         return rc;
2310 }
2311
2312 static void tcp_seq_stop(struct seq_file *seq, void *v)
2313 {
2314         struct tcp_iter_state *st = seq->private;
2315
2316         switch (st->state) {
2317         case TCP_SEQ_STATE_OPENREQ:
2318                 if (v) {
2319                         struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2320                         read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2321                 }
2322         case TCP_SEQ_STATE_LISTENING:
2323                 if (v != SEQ_START_TOKEN)
2324                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2325                 break;
2326         case TCP_SEQ_STATE_TIME_WAIT:
2327         case TCP_SEQ_STATE_ESTABLISHED:
2328                 if (v)
2329                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2330                 break;
2331         }
2332 }
2333
2334 static int tcp_seq_open(struct inode *inode, struct file *file)
2335 {
2336         struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2337         struct tcp_iter_state *s;
2338         int err;
2339
2340         err = seq_open_net(inode, file, &afinfo->seq_ops,
2341                           sizeof(struct tcp_iter_state));
2342         if (err < 0)
2343                 return err;
2344
2345         s = ((struct seq_file *)file->private_data)->private;
2346         s->family               = afinfo->family;
2347         s->last_pos             = 0;
2348         return 0;
2349 }
2350
2351 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2352 {
2353         int rc = 0;
2354         struct proc_dir_entry *p;
2355
2356         afinfo->seq_fops.open           = tcp_seq_open;
2357         afinfo->seq_fops.read           = seq_read;
2358         afinfo->seq_fops.llseek         = seq_lseek;
2359         afinfo->seq_fops.release        = seq_release_net;
2360
2361         afinfo->seq_ops.start           = tcp_seq_start;
2362         afinfo->seq_ops.next            = tcp_seq_next;
2363         afinfo->seq_ops.stop            = tcp_seq_stop;
2364
2365         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2366                              &afinfo->seq_fops, afinfo);
2367         if (!p)
2368                 rc = -ENOMEM;
2369         return rc;
2370 }
2371 EXPORT_SYMBOL(tcp_proc_register);
2372
2373 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2374 {
2375         proc_net_remove(net, afinfo->name);
2376 }
2377 EXPORT_SYMBOL(tcp_proc_unregister);
2378
2379 static void get_openreq4(struct sock *sk, struct request_sock *req,
2380                          struct seq_file *f, int i, int uid, int *len)
2381 {
2382         const struct inet_request_sock *ireq = inet_rsk(req);
2383         int ttd = req->expires - jiffies;
2384
2385         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2386                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2387                 i,
2388                 ireq->loc_addr,
2389                 ntohs(inet_sk(sk)->inet_sport),
2390                 ireq->rmt_addr,
2391                 ntohs(ireq->rmt_port),
2392                 TCP_SYN_RECV,
2393                 0, 0, /* could print option size, but that is af dependent. */
2394                 1,    /* timers active (only the expire timer) */
2395                 jiffies_to_clock_t(ttd),
2396                 req->retrans,
2397                 uid,
2398                 0,  /* non standard timer */
2399                 0, /* open_requests have no inode */
2400                 atomic_read(&sk->sk_refcnt),
2401                 req,
2402                 len);
2403 }
2404
2405 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2406 {
2407         int timer_active;
2408         unsigned long timer_expires;
2409         struct tcp_sock *tp = tcp_sk(sk);
2410         const struct inet_connection_sock *icsk = inet_csk(sk);
2411         struct inet_sock *inet = inet_sk(sk);
2412         __be32 dest = inet->inet_daddr;
2413         __be32 src = inet->inet_rcv_saddr;
2414         __u16 destp = ntohs(inet->inet_dport);
2415         __u16 srcp = ntohs(inet->inet_sport);
2416         int rx_queue;
2417
2418         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2419                 timer_active    = 1;
2420                 timer_expires   = icsk->icsk_timeout;
2421         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2422                 timer_active    = 4;
2423                 timer_expires   = icsk->icsk_timeout;
2424         } else if (timer_pending(&sk->sk_timer)) {
2425                 timer_active    = 2;
2426                 timer_expires   = sk->sk_timer.expires;
2427         } else {
2428                 timer_active    = 0;
2429                 timer_expires = jiffies;
2430         }
2431
2432         if (sk->sk_state == TCP_LISTEN)
2433                 rx_queue = sk->sk_ack_backlog;
2434         else
2435                 /*
2436                  * because we dont lock socket, we might find a transient negative value
2437                  */
2438                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2439
2440         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2441                         "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2442                 i, src, srcp, dest, destp, sk->sk_state,
2443                 tp->write_seq - tp->snd_una,
2444                 rx_queue,
2445                 timer_active,
2446                 jiffies_to_clock_t(timer_expires - jiffies),
2447                 icsk->icsk_retransmits,
2448                 sock_i_uid(sk),
2449                 icsk->icsk_probes_out,
2450                 sock_i_ino(sk),
2451                 atomic_read(&sk->sk_refcnt), sk,
2452                 jiffies_to_clock_t(icsk->icsk_rto),
2453                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2454                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2455                 tp->snd_cwnd,
2456                 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2457                 len);
2458 }
2459
2460 static void get_timewait4_sock(struct inet_timewait_sock *tw,
2461                                struct seq_file *f, int i, int *len)
2462 {
2463         __be32 dest, src;
2464         __u16 destp, srcp;
2465         int ttd = tw->tw_ttd - jiffies;
2466
2467         if (ttd < 0)
2468                 ttd = 0;
2469
2470         dest  = tw->tw_daddr;
2471         src   = tw->tw_rcv_saddr;
2472         destp = ntohs(tw->tw_dport);
2473         srcp  = ntohs(tw->tw_sport);
2474
2475         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2476                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2477                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2478                 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2479                 atomic_read(&tw->tw_refcnt), tw, len);
2480 }
2481
2482 #define TMPSZ 150
2483
2484 static int tcp4_seq_show(struct seq_file *seq, void *v)
2485 {
2486         struct tcp_iter_state *st;
2487         int len;
2488
2489         if (v == SEQ_START_TOKEN) {
2490                 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2491                            "  sl  local_address rem_address   st tx_queue "
2492                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2493                            "inode");
2494                 goto out;
2495         }
2496         st = seq->private;
2497
2498         switch (st->state) {
2499         case TCP_SEQ_STATE_LISTENING:
2500         case TCP_SEQ_STATE_ESTABLISHED:
2501                 get_tcp4_sock(v, seq, st->num, &len);
2502                 break;
2503         case TCP_SEQ_STATE_OPENREQ:
2504                 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2505                 break;
2506         case TCP_SEQ_STATE_TIME_WAIT:
2507                 get_timewait4_sock(v, seq, st->num, &len);
2508                 break;
2509         }
2510         seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2511 out:
2512         return 0;
2513 }
2514
2515 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2516         .name           = "tcp",
2517         .family         = AF_INET,
2518         .seq_fops       = {
2519                 .owner          = THIS_MODULE,
2520         },
2521         .seq_ops        = {
2522                 .show           = tcp4_seq_show,
2523         },
2524 };
2525
2526 static int __net_init tcp4_proc_init_net(struct net *net)
2527 {
2528         return tcp_proc_register(net, &tcp4_seq_afinfo);
2529 }
2530
2531 static void __net_exit tcp4_proc_exit_net(struct net *net)
2532 {
2533         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2534 }
2535
2536 static struct pernet_operations tcp4_net_ops = {
2537         .init = tcp4_proc_init_net,
2538         .exit = tcp4_proc_exit_net,
2539 };
2540
2541 int __init tcp4_proc_init(void)
2542 {
2543         return register_pernet_subsys(&tcp4_net_ops);
2544 }
2545
2546 void tcp4_proc_exit(void)
2547 {
2548         unregister_pernet_subsys(&tcp4_net_ops);
2549 }
2550 #endif /* CONFIG_PROC_FS */
2551
2552 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2553 {
2554         struct iphdr *iph = skb_gro_network_header(skb);
2555
2556         switch (skb->ip_summed) {
2557         case CHECKSUM_COMPLETE:
2558                 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2559                                   skb->csum)) {
2560                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2561                         break;
2562                 }
2563
2564                 /* fall through */
2565         case CHECKSUM_NONE:
2566                 NAPI_GRO_CB(skb)->flush = 1;
2567                 return NULL;
2568         }
2569
2570         return tcp_gro_receive(head, skb);
2571 }
2572 EXPORT_SYMBOL(tcp4_gro_receive);
2573
2574 int tcp4_gro_complete(struct sk_buff *skb)
2575 {
2576         struct iphdr *iph = ip_hdr(skb);
2577         struct tcphdr *th = tcp_hdr(skb);
2578
2579         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2580                                   iph->saddr, iph->daddr, 0);
2581         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2582
2583         return tcp_gro_complete(skb);
2584 }
2585 EXPORT_SYMBOL(tcp4_gro_complete);
2586
2587 struct proto tcp_prot = {
2588         .name                   = "TCP",
2589         .owner                  = THIS_MODULE,
2590         .close                  = tcp_close,
2591         .connect                = tcp_v4_connect,
2592         .disconnect             = tcp_disconnect,
2593         .accept                 = inet_csk_accept,
2594         .ioctl                  = tcp_ioctl,
2595         .init                   = tcp_v4_init_sock,
2596         .destroy                = tcp_v4_destroy_sock,
2597         .shutdown               = tcp_shutdown,
2598         .setsockopt             = tcp_setsockopt,
2599         .getsockopt             = tcp_getsockopt,
2600         .recvmsg                = tcp_recvmsg,
2601         .sendmsg                = tcp_sendmsg,
2602         .sendpage               = tcp_sendpage,
2603         .backlog_rcv            = tcp_v4_do_rcv,
2604         .hash                   = inet_hash,
2605         .unhash                 = inet_unhash,
2606         .get_port               = inet_csk_get_port,
2607         .enter_memory_pressure  = tcp_enter_memory_pressure,
2608         .sockets_allocated      = &tcp_sockets_allocated,
2609         .orphan_count           = &tcp_orphan_count,
2610         .memory_allocated       = &tcp_memory_allocated,
2611         .memory_pressure        = &tcp_memory_pressure,
2612         .sysctl_mem             = sysctl_tcp_mem,
2613         .sysctl_wmem            = sysctl_tcp_wmem,
2614         .sysctl_rmem            = sysctl_tcp_rmem,
2615         .max_header             = MAX_TCP_HEADER,
2616         .obj_size               = sizeof(struct tcp_sock),
2617         .slab_flags             = SLAB_DESTROY_BY_RCU,
2618         .twsk_prot              = &tcp_timewait_sock_ops,
2619         .rsk_prot               = &tcp_request_sock_ops,
2620         .h.hashinfo             = &tcp_hashinfo,
2621         .no_autobind            = true,
2622 #ifdef CONFIG_COMPAT
2623         .compat_setsockopt      = compat_tcp_setsockopt,
2624         .compat_getsockopt      = compat_tcp_getsockopt,
2625 #endif
2626 };
2627 EXPORT_SYMBOL(tcp_prot);
2628
2629
2630 static int __net_init tcp_sk_init(struct net *net)
2631 {
2632         return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2633                                     PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2634 }
2635
2636 static void __net_exit tcp_sk_exit(struct net *net)
2637 {
2638         inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2639 }
2640
2641 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2642 {
2643         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2644 }
2645
2646 static struct pernet_operations __net_initdata tcp_sk_ops = {
2647        .init       = tcp_sk_init,
2648        .exit       = tcp_sk_exit,
2649        .exit_batch = tcp_sk_exit_batch,
2650 };
2651
2652 void __init tcp_v4_init(void)
2653 {
2654         inet_hashinfo_init(&tcp_hashinfo);
2655         if (register_pernet_subsys(&tcp_sk_ops))
2656                 panic("Failed to create the TCP control socket.\n");
2657 }