2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
27 #include <net/inet_common.h>
30 int sysctl_tcp_syncookies __read_mostly = 1;
31 EXPORT_SYMBOL(sysctl_tcp_syncookies);
33 int sysctl_tcp_abort_on_overflow __read_mostly;
35 struct inet_timewait_death_row tcp_death_row = {
36 .sysctl_max_tw_buckets = NR_FILE * 2,
37 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
38 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
39 .hashinfo = &tcp_hashinfo,
40 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
41 (unsigned long)&tcp_death_row),
42 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
43 inet_twdr_twkill_work),
44 /* Short-time timewait calendar */
47 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
48 (unsigned long)&tcp_death_row),
50 EXPORT_SYMBOL_GPL(tcp_death_row);
52 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
56 if (after(end_seq, s_win) && before(seq, e_win))
58 return seq == e_win && seq == end_seq;
62 * * Main purpose of TIME-WAIT state is to close connection gracefully,
63 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
64 * (and, probably, tail of data) and one or more our ACKs are lost.
65 * * What is TIME-WAIT timeout? It is associated with maximal packet
66 * lifetime in the internet, which results in wrong conclusion, that
67 * it is set to catch "old duplicate segments" wandering out of their path.
68 * It is not quite correct. This timeout is calculated so that it exceeds
69 * maximal retransmission timeout enough to allow to lose one (or more)
70 * segments sent by peer and our ACKs. This time may be calculated from RTO.
71 * * When TIME-WAIT socket receives RST, it means that another end
72 * finally closed and we are allowed to kill TIME-WAIT too.
73 * * Second purpose of TIME-WAIT is catching old duplicate segments.
74 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
75 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
76 * * If we invented some more clever way to catch duplicates
77 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
79 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
80 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
81 * from the very beginning.
83 * NOTE. With recycling (and later with fin-wait-2) TW bucket
84 * is _not_ stateless. It means, that strictly speaking we must
85 * spinlock it. I do not want! Well, probability of misbehaviour
86 * is ridiculously low and, seems, we could use some mb() tricks
87 * to avoid misread sequence numbers, states etc. --ANK
90 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
91 const struct tcphdr *th)
93 struct tcp_options_received tmp_opt;
94 const u8 *hash_location;
95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 bool paws_reject = false;
98 tmp_opt.saw_tstamp = 0;
99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
100 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
102 if (tmp_opt.saw_tstamp) {
103 tmp_opt.ts_recent = tcptw->tw_ts_recent;
104 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
105 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
109 if (tw->tw_substate == TCP_FIN_WAIT2) {
110 /* Just repeat all the checks of tcp_rcv_state_process() */
112 /* Out of window, send ACK */
114 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
116 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
122 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
127 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
128 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
130 return TCP_TW_SUCCESS;
133 /* New data or FIN. If new data arrive after half-duplex close,
137 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
139 inet_twsk_deschedule(tw, &tcp_death_row);
144 /* FIN arrived, enter true time-wait state. */
145 tw->tw_substate = TCP_TIME_WAIT;
146 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
147 if (tmp_opt.saw_tstamp) {
148 tcptw->tw_ts_recent_stamp = get_seconds();
149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
152 if (tcp_death_row.sysctl_tw_recycle &&
153 tcptw->tw_ts_recent_stamp &&
154 tcp_tw_remember_stamp(tw))
155 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
158 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
164 * Now real TIME-WAIT state.
167 * "When a connection is [...] on TIME-WAIT state [...]
168 * [a TCP] MAY accept a new SYN from the remote TCP to
169 * reopen the connection directly, if it:
171 * (1) assigns its initial sequence number for the new
172 * connection to be larger than the largest sequence
173 * number it used on the previous connection incarnation,
176 * (2) returns to TIME-WAIT state if the SYN turns out
177 * to be an old duplicate".
181 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
182 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
183 /* In window segment, it may be only reset or bare ack. */
186 /* This is TIME_WAIT assassination, in two flavors.
187 * Oh well... nobody has a sufficient solution to this
190 if (sysctl_tcp_rfc1337 == 0) {
192 inet_twsk_deschedule(tw, &tcp_death_row);
194 return TCP_TW_SUCCESS;
197 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
200 if (tmp_opt.saw_tstamp) {
201 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
202 tcptw->tw_ts_recent_stamp = get_seconds();
206 return TCP_TW_SUCCESS;
209 /* Out of window segment.
211 All the segments are ACKed immediately.
213 The only exception is new SYN. We accept it, if it is
214 not old duplicate and we are not in danger to be killed
215 by delayed old duplicates. RFC check is that it has
216 newer sequence number works at rates <40Mbit/sec.
217 However, if paws works, it is reliable AND even more,
218 we even may relax silly seq space cutoff.
220 RED-PEN: we violate main RFC requirement, if this SYN will appear
221 old duplicate (i.e. we receive RST in reply to SYN-ACK),
222 we must return socket to time-wait state. It is not good,
226 if (th->syn && !th->rst && !th->ack && !paws_reject &&
227 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
228 (tmp_opt.saw_tstamp &&
229 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
230 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
233 TCP_SKB_CB(skb)->when = isn;
238 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
241 /* In this case we must reset the TIMEWAIT timer.
243 * If it is ACKless SYN it may be both old duplicate
244 * and new good SYN with random sequence number <rcv_nxt.
245 * Do not reschedule in the last case.
247 if (paws_reject || th->ack)
248 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
251 /* Send ACK. Note, we do not put the bucket,
252 * it will be released by caller.
257 return TCP_TW_SUCCESS;
259 EXPORT_SYMBOL(tcp_timewait_state_process);
262 * Move a socket to time-wait or dead fin-wait-2 state.
264 void tcp_time_wait(struct sock *sk, int state, int timeo)
266 struct inet_timewait_sock *tw = NULL;
267 const struct inet_connection_sock *icsk = inet_csk(sk);
268 const struct tcp_sock *tp = tcp_sk(sk);
269 bool recycle_ok = false;
271 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
272 recycle_ok = tcp_remember_stamp(sk);
274 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
275 tw = inet_twsk_alloc(sk, state);
278 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
279 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
280 struct inet_sock *inet = inet_sk(sk);
282 tw->tw_transparent = inet->transparent;
283 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
284 tcptw->tw_rcv_nxt = tp->rcv_nxt;
285 tcptw->tw_snd_nxt = tp->snd_nxt;
286 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
287 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
288 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
290 #if IS_ENABLED(CONFIG_IPV6)
291 if (tw->tw_family == PF_INET6) {
292 struct ipv6_pinfo *np = inet6_sk(sk);
293 struct inet6_timewait_sock *tw6;
295 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
296 tw6 = inet6_twsk((struct sock *)tw);
297 tw6->tw_v6_daddr = np->daddr;
298 tw6->tw_v6_rcv_saddr = np->rcv_saddr;
299 tw->tw_tclass = np->tclass;
300 tw->tw_ipv6only = np->ipv6only;
304 #ifdef CONFIG_TCP_MD5SIG
306 * The timewait bucket does not have the key DB from the
307 * sock structure. We just make a quick copy of the
308 * md5 key being used (if indeed we are using one)
309 * so the timewait ack generating code has the key.
312 struct tcp_md5sig_key *key;
313 tcptw->tw_md5_key = NULL;
314 key = tp->af_specific->md5_lookup(sk, sk);
316 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
317 if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
323 /* Linkage updates. */
324 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
326 /* Get the TIME_WAIT timeout firing. */
331 tw->tw_timeout = rto;
333 tw->tw_timeout = TCP_TIMEWAIT_LEN;
334 if (state == TCP_TIME_WAIT)
335 timeo = TCP_TIMEWAIT_LEN;
338 inet_twsk_schedule(tw, &tcp_death_row, timeo,
342 /* Sorry, if we're out of memory, just CLOSE this
343 * socket up. We've got bigger problems than
344 * non-graceful socket closings.
346 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
349 tcp_update_metrics(sk);
353 void tcp_twsk_destructor(struct sock *sk)
355 #ifdef CONFIG_TCP_MD5SIG
356 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
358 if (twsk->tw_md5_key) {
359 tcp_free_md5sig_pool();
360 kfree_rcu(twsk->tw_md5_key, rcu);
364 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
366 static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
367 struct request_sock *req)
369 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
372 /* This is not only more efficient than what we used to do, it eliminates
373 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
375 * Actually, we could lots of memory writes here. tp of listening
376 * socket contains all necessary default parameters.
378 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
380 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
383 const struct inet_request_sock *ireq = inet_rsk(req);
384 struct tcp_request_sock *treq = tcp_rsk(req);
385 struct inet_connection_sock *newicsk = inet_csk(newsk);
386 struct tcp_sock *newtp = tcp_sk(newsk);
387 struct tcp_sock *oldtp = tcp_sk(sk);
388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
390 inet_sk_rx_dst_set(newsk, skb);
392 /* TCP Cookie Transactions require space for the cookie pair,
393 * as it differs for each connection. There is no need to
394 * copy any s_data_payload stored at the original socket.
395 * Failure will prevent resuming the connection.
397 * Presumed copied, in order of appearance:
398 * cookie_in_always, cookie_out_never
400 if (oldcvp != NULL) {
401 struct tcp_cookie_values *newcvp =
402 kzalloc(sizeof(*newtp->cookie_values),
405 if (newcvp != NULL) {
406 kref_init(&newcvp->kref);
407 newcvp->cookie_desired =
408 oldcvp->cookie_desired;
409 newtp->cookie_values = newcvp;
411 /* Not Yet Implemented */
412 newtp->cookie_values = NULL;
416 /* Now setup tcp_sock */
417 newtp->pred_flags = 0;
419 newtp->rcv_wup = newtp->copied_seq =
420 newtp->rcv_nxt = treq->rcv_isn + 1;
422 newtp->snd_sml = newtp->snd_una =
423 newtp->snd_nxt = newtp->snd_up =
424 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
426 tcp_prequeue_init(newtp);
427 INIT_LIST_HEAD(&newtp->tsq_node);
429 tcp_init_wl(newtp, treq->rcv_isn);
432 newtp->mdev = TCP_TIMEOUT_INIT;
433 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
435 newtp->packets_out = 0;
436 newtp->retrans_out = 0;
437 newtp->sacked_out = 0;
438 newtp->fackets_out = 0;
439 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
440 tcp_enable_early_retrans(newtp);
442 /* So many TCP implementations out there (incorrectly) count the
443 * initial SYN frame in their delayed-ACK and congestion control
444 * algorithms that we must have the following bandaid to talk
445 * efficiently to them. -DaveM
447 newtp->snd_cwnd = TCP_INIT_CWND;
448 newtp->snd_cwnd_cnt = 0;
449 newtp->bytes_acked = 0;
451 newtp->frto_counter = 0;
452 newtp->frto_highmark = 0;
454 if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
455 !try_module_get(newicsk->icsk_ca_ops->owner))
456 newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
458 tcp_set_ca_state(newsk, TCP_CA_Open);
459 tcp_init_xmit_timers(newsk);
460 skb_queue_head_init(&newtp->out_of_order_queue);
461 newtp->write_seq = newtp->pushed_seq =
462 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
464 newtp->rx_opt.saw_tstamp = 0;
466 newtp->rx_opt.dsack = 0;
467 newtp->rx_opt.num_sacks = 0;
471 if (sock_flag(newsk, SOCK_KEEPOPEN))
472 inet_csk_reset_keepalive_timer(newsk,
473 keepalive_time_when(newtp));
475 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
476 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
478 tcp_enable_fack(newtp);
480 newtp->window_clamp = req->window_clamp;
481 newtp->rcv_ssthresh = req->rcv_wnd;
482 newtp->rcv_wnd = req->rcv_wnd;
483 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
484 if (newtp->rx_opt.wscale_ok) {
485 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
486 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
488 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
489 newtp->window_clamp = min(newtp->window_clamp, 65535U);
491 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
492 newtp->rx_opt.snd_wscale);
493 newtp->max_window = newtp->snd_wnd;
495 if (newtp->rx_opt.tstamp_ok) {
496 newtp->rx_opt.ts_recent = req->ts_recent;
497 newtp->rx_opt.ts_recent_stamp = get_seconds();
498 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
500 newtp->rx_opt.ts_recent_stamp = 0;
501 newtp->tcp_header_len = sizeof(struct tcphdr);
503 #ifdef CONFIG_TCP_MD5SIG
504 newtp->md5sig_info = NULL; /*XXX*/
505 if (newtp->af_specific->md5_lookup(sk, newsk))
506 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
508 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
509 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
510 newtp->rx_opt.mss_clamp = req->mss;
511 TCP_ECN_openreq_child(newtp, req);
513 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
517 EXPORT_SYMBOL(tcp_create_openreq_child);
520 * Process an incoming packet for SYN_RECV sockets represented
524 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
525 struct request_sock *req,
526 struct request_sock **prev)
528 struct tcp_options_received tmp_opt;
529 const u8 *hash_location;
531 const struct tcphdr *th = tcp_hdr(skb);
532 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
533 bool paws_reject = false;
535 tmp_opt.saw_tstamp = 0;
536 if (th->doff > (sizeof(struct tcphdr)>>2)) {
537 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
539 if (tmp_opt.saw_tstamp) {
540 tmp_opt.ts_recent = req->ts_recent;
541 /* We do not store true stamp, but it is not required,
542 * it can be estimated (approximately)
545 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
546 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
550 /* Check for pure retransmitted SYN. */
551 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
552 flg == TCP_FLAG_SYN &&
555 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
556 * this case on figure 6 and figure 8, but formal
557 * protocol description says NOTHING.
558 * To be more exact, it says that we should send ACK,
559 * because this segment (at least, if it has no data)
562 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
563 * describe SYN-RECV state. All the description
564 * is wrong, we cannot believe to it and should
565 * rely only on common sense and implementation
568 * Enforce "SYN-ACK" according to figure 8, figure 6
569 * of RFC793, fixed by RFC1122.
571 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
575 /* Further reproduces section "SEGMENT ARRIVES"
576 for state SYN-RECEIVED of RFC793.
577 It is broken, however, it does not work only
578 when SYNs are crossed.
580 You would think that SYN crossing is impossible here, since
581 we should have a SYN_SENT socket (from connect()) on our end,
582 but this is not true if the crossed SYNs were sent to both
583 ends by a malicious third party. We must defend against this,
584 and to do that we first verify the ACK (as per RFC793, page
585 36) and reset if it is invalid. Is this a true full defense?
586 To convince ourselves, let us consider a way in which the ACK
587 test can still pass in this 'malicious crossed SYNs' case.
588 Malicious sender sends identical SYNs (and thus identical sequence
589 numbers) to both A and B:
594 By our good fortune, both A and B select the same initial
595 send sequence number of seven :-)
597 A: sends SYN|ACK, seq=7, ack_seq=8
598 B: sends SYN|ACK, seq=7, ack_seq=8
600 So we are now A eating this SYN|ACK, ACK test passes. So
601 does sequence test, SYN is truncated, and thus we consider
604 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
605 bare ACK. Otherwise, we create an established connection. Both
606 ends (listening sockets) accept the new incoming connection and try
607 to talk to each other. 8-)
609 Note: This case is both harmless, and rare. Possibility is about the
610 same as us discovering intelligent life on another plant tomorrow.
612 But generally, we should (RFC lies!) to accept ACK
613 from SYNACK both here and in tcp_rcv_state_process().
614 tcp_rcv_state_process() does not, hence, we do not too.
616 Note that the case is absolutely generic:
617 we cannot optimize anything here without
618 violating protocol. All the checks must be made
619 before attempt to create socket.
622 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
623 * and the incoming segment acknowledges something not yet
624 * sent (the segment carries an unacceptable ACK) ...
627 * Invalid ACK: reset will be sent by listening socket
629 if ((flg & TCP_FLAG_ACK) &&
630 (TCP_SKB_CB(skb)->ack_seq !=
631 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
634 /* Also, it would be not so bad idea to check rcv_tsecr, which
635 * is essentially ACK extension and too early or too late values
636 * should cause reset in unsynchronized states.
639 /* RFC793: "first check sequence number". */
641 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
642 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
643 /* Out of window: send ACK and drop. */
644 if (!(flg & TCP_FLAG_RST))
645 req->rsk_ops->send_ack(sk, skb, req);
647 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
651 /* In sequence, PAWS is OK. */
653 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
654 req->ts_recent = tmp_opt.rcv_tsval;
656 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
657 /* Truncate SYN, it is out of window starting
658 at tcp_rsk(req)->rcv_isn + 1. */
659 flg &= ~TCP_FLAG_SYN;
662 /* RFC793: "second check the RST bit" and
663 * "fourth, check the SYN bit"
665 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
666 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
667 goto embryonic_reset;
670 /* ACK sequence verified above, just make sure ACK is
671 * set. If ACK not set, just silently drop the packet.
673 if (!(flg & TCP_FLAG_ACK))
676 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
677 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
678 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
679 inet_rsk(req)->acked = 1;
680 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
683 if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
684 tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
685 else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
686 tcp_rsk(req)->snt_synack = 0;
688 /* OK, ACK is valid, create big socket and
689 * feed this segment to it. It will repeat all
690 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
691 * ESTABLISHED STATE. If it will be dropped after
692 * socket is created, wait for troubles.
694 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
696 goto listen_overflow;
698 inet_csk_reqsk_queue_unlink(sk, req, prev);
699 inet_csk_reqsk_queue_removed(sk, req);
701 inet_csk_reqsk_queue_add(sk, req, child);
705 if (!sysctl_tcp_abort_on_overflow) {
706 inet_rsk(req)->acked = 1;
711 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
712 if (!(flg & TCP_FLAG_RST))
713 req->rsk_ops->send_reset(sk, skb);
715 inet_csk_reqsk_queue_drop(sk, req, prev);
718 EXPORT_SYMBOL(tcp_check_req);
721 * Queue segment on the new socket if the new socket is active,
722 * otherwise we just shortcircuit this and continue with
726 int tcp_child_process(struct sock *parent, struct sock *child,
730 int state = child->sk_state;
732 if (!sock_owned_by_user(child)) {
733 ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
735 /* Wakeup parent, send SIGIO */
736 if (state == TCP_SYN_RECV && child->sk_state != state)
737 parent->sk_data_ready(parent, 0);
739 /* Alas, it is possible again, because we do lookup
740 * in main socket hash table and lock on listening
741 * socket does not protect us more.
743 __sk_add_backlog(child, skb);
746 bh_unlock_sock(child);
750 EXPORT_SYMBOL(tcp_child_process);