1 // SPDX-License-Identifier: GPL-2.0-only
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Implementation of the Transmission Control Protocol(TCP).
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
24 #include <net/busy_poll.h>
26 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
30 if (after(end_seq, s_win) && before(seq, e_win))
32 return seq == e_win && seq == end_seq;
35 static enum tcp_tw_status
36 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
37 const struct sk_buff *skb, int mib_idx)
39 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
41 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
42 &tcptw->tw_last_oow_ack_time)) {
43 /* Send ACK. Note, we do not put the bucket,
44 * it will be released by caller.
49 /* We are rate-limiting, so just release the tw sock and drop skb. */
51 return TCP_TW_SUCCESS;
55 * * Main purpose of TIME-WAIT state is to close connection gracefully,
56 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
57 * (and, probably, tail of data) and one or more our ACKs are lost.
58 * * What is TIME-WAIT timeout? It is associated with maximal packet
59 * lifetime in the internet, which results in wrong conclusion, that
60 * it is set to catch "old duplicate segments" wandering out of their path.
61 * It is not quite correct. This timeout is calculated so that it exceeds
62 * maximal retransmission timeout enough to allow to lose one (or more)
63 * segments sent by peer and our ACKs. This time may be calculated from RTO.
64 * * When TIME-WAIT socket receives RST, it means that another end
65 * finally closed and we are allowed to kill TIME-WAIT too.
66 * * Second purpose of TIME-WAIT is catching old duplicate segments.
67 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
68 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
69 * * If we invented some more clever way to catch duplicates
70 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
72 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
73 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
74 * from the very beginning.
76 * NOTE. With recycling (and later with fin-wait-2) TW bucket
77 * is _not_ stateless. It means, that strictly speaking we must
78 * spinlock it. I do not want! Well, probability of misbehaviour
79 * is ridiculously low and, seems, we could use some mb() tricks
80 * to avoid misread sequence numbers, states etc. --ANK
82 * We don't need to initialize tmp_out.sack_ok as we don't use the results
85 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
86 const struct tcphdr *th)
88 struct tcp_options_received tmp_opt;
89 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
90 bool paws_reject = false;
92 tmp_opt.saw_tstamp = 0;
93 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
94 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
96 if (tmp_opt.saw_tstamp) {
97 if (tmp_opt.rcv_tsecr)
98 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
99 tmp_opt.ts_recent = tcptw->tw_ts_recent;
100 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
101 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
105 if (tw->tw_substate == TCP_FIN_WAIT2) {
106 /* Just repeat all the checks of tcp_rcv_state_process() */
108 /* Out of window, send ACK */
110 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
112 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
113 return tcp_timewait_check_oow_rate_limit(
114 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
119 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
124 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
125 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
127 return TCP_TW_SUCCESS;
130 /* New data or FIN. If new data arrive after half-duplex close,
134 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
137 /* FIN arrived, enter true time-wait state. */
138 tw->tw_substate = TCP_TIME_WAIT;
139 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
140 if (tmp_opt.saw_tstamp) {
141 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
142 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
145 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
150 * Now real TIME-WAIT state.
153 * "When a connection is [...] on TIME-WAIT state [...]
154 * [a TCP] MAY accept a new SYN from the remote TCP to
155 * reopen the connection directly, if it:
157 * (1) assigns its initial sequence number for the new
158 * connection to be larger than the largest sequence
159 * number it used on the previous connection incarnation,
162 * (2) returns to TIME-WAIT state if the SYN turns out
163 * to be an old duplicate".
167 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
168 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
169 /* In window segment, it may be only reset or bare ack. */
172 /* This is TIME_WAIT assassination, in two flavors.
173 * Oh well... nobody has a sufficient solution to this
176 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
178 inet_twsk_deschedule_put(tw);
179 return TCP_TW_SUCCESS;
182 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
185 if (tmp_opt.saw_tstamp) {
186 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
187 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
191 return TCP_TW_SUCCESS;
194 /* Out of window segment.
196 All the segments are ACKed immediately.
198 The only exception is new SYN. We accept it, if it is
199 not old duplicate and we are not in danger to be killed
200 by delayed old duplicates. RFC check is that it has
201 newer sequence number works at rates <40Mbit/sec.
202 However, if paws works, it is reliable AND even more,
203 we even may relax silly seq space cutoff.
205 RED-PEN: we violate main RFC requirement, if this SYN will appear
206 old duplicate (i.e. we receive RST in reply to SYN-ACK),
207 we must return socket to time-wait state. It is not good,
211 if (th->syn && !th->rst && !th->ack && !paws_reject &&
212 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
213 (tmp_opt.saw_tstamp &&
214 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
215 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
218 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
223 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
226 /* In this case we must reset the TIMEWAIT timer.
228 * If it is ACKless SYN it may be both old duplicate
229 * and new good SYN with random sequence number <rcv_nxt.
230 * Do not reschedule in the last case.
232 if (paws_reject || th->ack)
233 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
235 return tcp_timewait_check_oow_rate_limit(
236 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
239 return TCP_TW_SUCCESS;
241 EXPORT_SYMBOL(tcp_timewait_state_process);
244 * Move a socket to time-wait or dead fin-wait-2 state.
246 void tcp_time_wait(struct sock *sk, int state, int timeo)
248 const struct inet_connection_sock *icsk = inet_csk(sk);
249 const struct tcp_sock *tp = tcp_sk(sk);
250 struct net *net = sock_net(sk);
251 struct inet_timewait_sock *tw;
253 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
256 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
257 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
258 struct inet_sock *inet = inet_sk(sk);
260 tw->tw_transparent = inet->transparent;
261 tw->tw_mark = sk->sk_mark;
262 tw->tw_priority = sk->sk_priority;
263 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
264 tcptw->tw_rcv_nxt = tp->rcv_nxt;
265 tcptw->tw_snd_nxt = tp->snd_nxt;
266 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
267 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
268 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
269 tcptw->tw_ts_offset = tp->tsoffset;
270 tcptw->tw_last_oow_ack_time = 0;
271 tcptw->tw_tx_delay = tp->tcp_tx_delay;
272 #if IS_ENABLED(CONFIG_IPV6)
273 if (tw->tw_family == PF_INET6) {
274 struct ipv6_pinfo *np = inet6_sk(sk);
276 tw->tw_v6_daddr = sk->sk_v6_daddr;
277 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
278 tw->tw_tclass = np->tclass;
279 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
280 tw->tw_txhash = sk->sk_txhash;
281 tw->tw_ipv6only = sk->sk_ipv6only;
285 #ifdef CONFIG_TCP_MD5SIG
287 * The timewait bucket does not have the key DB from the
288 * sock structure. We just make a quick copy of the
289 * md5 key being used (if indeed we are using one)
290 * so the timewait ack generating code has the key.
293 tcptw->tw_md5_key = NULL;
294 if (static_branch_unlikely(&tcp_md5_needed)) {
295 struct tcp_md5sig_key *key;
297 key = tp->af_specific->md5_lookup(sk, sk);
299 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
300 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
306 /* Get the TIME_WAIT timeout firing. */
310 if (state == TCP_TIME_WAIT)
311 timeo = TCP_TIMEWAIT_LEN;
313 /* tw_timer is pinned, so we need to make sure BH are disabled
314 * in following section, otherwise timer handler could run before
315 * we complete the initialization.
318 inet_twsk_schedule(tw, timeo);
320 * Note that access to tw after this point is illegal.
322 inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
325 /* Sorry, if we're out of memory, just CLOSE this
326 * socket up. We've got bigger problems than
327 * non-graceful socket closings.
329 NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
332 tcp_update_metrics(sk);
335 EXPORT_SYMBOL(tcp_time_wait);
337 void tcp_twsk_destructor(struct sock *sk)
339 #ifdef CONFIG_TCP_MD5SIG
340 if (static_branch_unlikely(&tcp_md5_needed)) {
341 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
343 if (twsk->tw_md5_key)
344 kfree_rcu(twsk->tw_md5_key, rcu);
348 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
350 void tcp_twsk_purge(struct list_head *net_exit_list, int family)
354 list_for_each_entry(net, net_exit_list, exit_list) {
355 /* The last refcount is decremented in tcp_sk_exit_batch() */
356 if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
359 inet_twsk_purge(&tcp_hashinfo, family);
363 EXPORT_SYMBOL_GPL(tcp_twsk_purge);
365 /* Warning : This function is called without sk_listener being locked.
366 * Be sure to read socket fields once, as their value could change under us.
368 void tcp_openreq_init_rwin(struct request_sock *req,
369 const struct sock *sk_listener,
370 const struct dst_entry *dst)
372 struct inet_request_sock *ireq = inet_rsk(req);
373 const struct tcp_sock *tp = tcp_sk(sk_listener);
374 int full_space = tcp_full_space(sk_listener);
380 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
381 window_clamp = READ_ONCE(tp->window_clamp);
382 /* Set this up on the first call only */
383 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
385 /* limit the window selection if the user enforce a smaller rx buffer */
386 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
387 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
388 req->rsk_window_clamp = full_space;
390 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
392 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
393 else if (full_space < rcv_wnd * mss)
394 full_space = rcv_wnd * mss;
396 /* tcp_full_space because it is guaranteed to be the first packet */
397 tcp_select_initial_window(sk_listener, full_space,
398 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
400 &req->rsk_window_clamp,
404 ireq->rcv_wscale = rcv_wscale;
406 EXPORT_SYMBOL(tcp_openreq_init_rwin);
408 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
409 const struct request_sock *req)
411 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
414 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
416 struct inet_connection_sock *icsk = inet_csk(sk);
417 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
418 bool ca_got_dst = false;
420 if (ca_key != TCP_CA_UNSPEC) {
421 const struct tcp_congestion_ops *ca;
424 ca = tcp_ca_find_key(ca_key);
425 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
426 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
427 icsk->icsk_ca_ops = ca;
433 /* If no valid choice made yet, assign current system default ca. */
435 (!icsk->icsk_ca_setsockopt ||
436 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
437 tcp_assign_congestion_control(sk);
439 tcp_set_ca_state(sk, TCP_CA_Open);
441 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
443 static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
444 struct request_sock *req,
445 struct tcp_sock *newtp)
447 #if IS_ENABLED(CONFIG_SMC)
448 struct inet_request_sock *ireq;
450 if (static_branch_unlikely(&tcp_have_smc)) {
451 ireq = inet_rsk(req);
452 if (oldtp->syn_smc && !ireq->smc_ok)
458 /* This is not only more efficient than what we used to do, it eliminates
459 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
461 * Actually, we could lots of memory writes here. tp of listening
462 * socket contains all necessary default parameters.
464 struct sock *tcp_create_openreq_child(const struct sock *sk,
465 struct request_sock *req,
468 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
469 const struct inet_request_sock *ireq = inet_rsk(req);
470 struct tcp_request_sock *treq = tcp_rsk(req);
471 struct inet_connection_sock *newicsk;
472 struct tcp_sock *oldtp, *newtp;
478 newicsk = inet_csk(newsk);
479 newtp = tcp_sk(newsk);
482 smc_check_reset_syn_req(oldtp, req, newtp);
484 /* Now setup tcp_sock */
485 newtp->pred_flags = 0;
487 seq = treq->rcv_isn + 1;
488 newtp->rcv_wup = seq;
489 WRITE_ONCE(newtp->copied_seq, seq);
490 WRITE_ONCE(newtp->rcv_nxt, seq);
493 seq = treq->snt_isn + 1;
494 newtp->snd_sml = newtp->snd_una = seq;
495 WRITE_ONCE(newtp->snd_nxt, seq);
498 INIT_LIST_HEAD(&newtp->tsq_node);
499 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
501 tcp_init_wl(newtp, treq->rcv_isn);
503 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
504 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
506 newtp->lsndtime = tcp_jiffies32;
507 newsk->sk_txhash = treq->txhash;
508 newtp->total_retrans = req->num_retrans;
510 tcp_init_xmit_timers(newsk);
511 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
513 if (sock_flag(newsk, SOCK_KEEPOPEN))
514 inet_csk_reset_keepalive_timer(newsk,
515 keepalive_time_when(newtp));
517 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
518 newtp->rx_opt.sack_ok = ireq->sack_ok;
519 newtp->window_clamp = req->rsk_window_clamp;
520 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
521 newtp->rcv_wnd = req->rsk_rcv_wnd;
522 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
523 if (newtp->rx_opt.wscale_ok) {
524 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
525 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
527 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
528 newtp->window_clamp = min(newtp->window_clamp, 65535U);
530 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
531 newtp->max_window = newtp->snd_wnd;
533 if (newtp->rx_opt.tstamp_ok) {
534 newtp->rx_opt.ts_recent = req->ts_recent;
535 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
536 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
538 newtp->rx_opt.ts_recent_stamp = 0;
539 newtp->tcp_header_len = sizeof(struct tcphdr);
541 if (req->num_timeout) {
542 newtp->undo_marker = treq->snt_isn;
543 newtp->retrans_stamp = div_u64(treq->snt_synack,
544 USEC_PER_SEC / TCP_TS_HZ);
546 newtp->tsoffset = treq->ts_off;
547 #ifdef CONFIG_TCP_MD5SIG
548 newtp->md5sig_info = NULL; /*XXX*/
549 if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
550 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
552 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
553 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
554 newtp->rx_opt.mss_clamp = req->mss;
555 tcp_ecn_openreq_child(newtp, req);
556 newtp->fastopen_req = NULL;
557 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
559 tcp_bpf_clone(sk, newsk);
561 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
565 EXPORT_SYMBOL(tcp_create_openreq_child);
568 * Process an incoming packet for SYN_RECV sockets represented as a
569 * request_sock. Normally sk is the listener socket but for TFO it
570 * points to the child socket.
572 * XXX (TFO) - The current impl contains a special check for ack
573 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
575 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
578 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
579 struct request_sock *req,
580 bool fastopen, bool *req_stolen)
582 struct tcp_options_received tmp_opt;
584 const struct tcphdr *th = tcp_hdr(skb);
585 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
586 bool paws_reject = false;
589 tmp_opt.saw_tstamp = 0;
590 if (th->doff > (sizeof(struct tcphdr)>>2)) {
591 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
593 if (tmp_opt.saw_tstamp) {
594 tmp_opt.ts_recent = req->ts_recent;
595 if (tmp_opt.rcv_tsecr)
596 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
597 /* We do not store true stamp, but it is not required,
598 * it can be estimated (approximately)
601 tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
602 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
606 /* Check for pure retransmitted SYN. */
607 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
608 flg == TCP_FLAG_SYN &&
611 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
612 * this case on figure 6 and figure 8, but formal
613 * protocol description says NOTHING.
614 * To be more exact, it says that we should send ACK,
615 * because this segment (at least, if it has no data)
618 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
619 * describe SYN-RECV state. All the description
620 * is wrong, we cannot believe to it and should
621 * rely only on common sense and implementation
624 * Enforce "SYN-ACK" according to figure 8, figure 6
625 * of RFC793, fixed by RFC1122.
627 * Note that even if there is new data in the SYN packet
628 * they will be thrown away too.
630 * Reset timer after retransmitting SYNACK, similar to
631 * the idea of fast retransmit in recovery.
633 if (!tcp_oow_rate_limited(sock_net(sk), skb,
634 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
635 &tcp_rsk(req)->last_oow_ack_time) &&
637 !inet_rtx_syn_ack(sk, req)) {
638 unsigned long expires = jiffies;
640 expires += reqsk_timeout(req, TCP_RTO_MAX);
642 mod_timer_pending(&req->rsk_timer, expires);
644 req->rsk_timer.expires = expires;
649 /* Further reproduces section "SEGMENT ARRIVES"
650 for state SYN-RECEIVED of RFC793.
651 It is broken, however, it does not work only
652 when SYNs are crossed.
654 You would think that SYN crossing is impossible here, since
655 we should have a SYN_SENT socket (from connect()) on our end,
656 but this is not true if the crossed SYNs were sent to both
657 ends by a malicious third party. We must defend against this,
658 and to do that we first verify the ACK (as per RFC793, page
659 36) and reset if it is invalid. Is this a true full defense?
660 To convince ourselves, let us consider a way in which the ACK
661 test can still pass in this 'malicious crossed SYNs' case.
662 Malicious sender sends identical SYNs (and thus identical sequence
663 numbers) to both A and B:
668 By our good fortune, both A and B select the same initial
669 send sequence number of seven :-)
671 A: sends SYN|ACK, seq=7, ack_seq=8
672 B: sends SYN|ACK, seq=7, ack_seq=8
674 So we are now A eating this SYN|ACK, ACK test passes. So
675 does sequence test, SYN is truncated, and thus we consider
678 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
679 bare ACK. Otherwise, we create an established connection. Both
680 ends (listening sockets) accept the new incoming connection and try
681 to talk to each other. 8-)
683 Note: This case is both harmless, and rare. Possibility is about the
684 same as us discovering intelligent life on another plant tomorrow.
686 But generally, we should (RFC lies!) to accept ACK
687 from SYNACK both here and in tcp_rcv_state_process().
688 tcp_rcv_state_process() does not, hence, we do not too.
690 Note that the case is absolutely generic:
691 we cannot optimize anything here without
692 violating protocol. All the checks must be made
693 before attempt to create socket.
696 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
697 * and the incoming segment acknowledges something not yet
698 * sent (the segment carries an unacceptable ACK) ...
701 * Invalid ACK: reset will be sent by listening socket.
702 * Note that the ACK validity check for a Fast Open socket is done
703 * elsewhere and is checked directly against the child socket rather
704 * than req because user data may have been sent out.
706 if ((flg & TCP_FLAG_ACK) && !fastopen &&
707 (TCP_SKB_CB(skb)->ack_seq !=
708 tcp_rsk(req)->snt_isn + 1))
711 /* Also, it would be not so bad idea to check rcv_tsecr, which
712 * is essentially ACK extension and too early or too late values
713 * should cause reset in unsynchronized states.
716 /* RFC793: "first check sequence number". */
718 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
719 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
720 /* Out of window: send ACK and drop. */
721 if (!(flg & TCP_FLAG_RST) &&
722 !tcp_oow_rate_limited(sock_net(sk), skb,
723 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
724 &tcp_rsk(req)->last_oow_ack_time))
725 req->rsk_ops->send_ack(sk, skb, req);
727 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
731 /* In sequence, PAWS is OK. */
733 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
734 req->ts_recent = tmp_opt.rcv_tsval;
736 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
737 /* Truncate SYN, it is out of window starting
738 at tcp_rsk(req)->rcv_isn + 1. */
739 flg &= ~TCP_FLAG_SYN;
742 /* RFC793: "second check the RST bit" and
743 * "fourth, check the SYN bit"
745 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
746 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
747 goto embryonic_reset;
750 /* ACK sequence verified above, just make sure ACK is
751 * set. If ACK not set, just silently drop the packet.
753 * XXX (TFO) - if we ever allow "data after SYN", the
754 * following check needs to be removed.
756 if (!(flg & TCP_FLAG_ACK))
759 /* For Fast Open no more processing is needed (sk is the
765 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
766 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
767 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
768 inet_rsk(req)->acked = 1;
769 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
773 /* OK, ACK is valid, create big socket and
774 * feed this segment to it. It will repeat all
775 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
776 * ESTABLISHED STATE. If it will be dropped after
777 * socket is created, wait for troubles.
779 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
782 goto listen_overflow;
784 if (own_req && rsk_drop_req(req)) {
785 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
786 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
790 sock_rps_save_rxhash(child, skb);
791 tcp_synack_rtt_meas(child, req);
792 *req_stolen = !own_req;
793 return inet_csk_complete_hashdance(sk, child, req, own_req);
796 if (sk != req->rsk_listener)
797 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
799 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
800 inet_rsk(req)->acked = 1;
805 if (!(flg & TCP_FLAG_RST)) {
806 /* Received a bad SYN pkt - for TFO We try not to reset
807 * the local connection unless it's really necessary to
808 * avoid becoming vulnerable to outside attack aiming at
809 * resetting legit local connections.
811 req->rsk_ops->send_reset(sk, skb);
812 } else if (fastopen) { /* received a valid RST pkt */
813 reqsk_fastopen_remove(sk, req, true);
817 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
820 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
821 *req_stolen = !unlinked;
825 EXPORT_SYMBOL(tcp_check_req);
828 * Queue segment on the new socket if the new socket is active,
829 * otherwise we just shortcircuit this and continue with
832 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
833 * when entering. But other states are possible due to a race condition
834 * where after __inet_lookup_established() fails but before the listener
835 * locked is obtained, other packets cause the same connection to
839 int tcp_child_process(struct sock *parent, struct sock *child,
841 __releases(&((child)->sk_lock.slock))
844 int state = child->sk_state;
846 /* record sk_napi_id and sk_rx_queue_mapping of child. */
847 sk_mark_napi_id_set(child, skb);
849 tcp_segs_in(tcp_sk(child), skb);
850 if (!sock_owned_by_user(child)) {
851 ret = tcp_rcv_state_process(child, skb);
852 /* Wakeup parent, send SIGIO */
853 if (state == TCP_SYN_RECV && child->sk_state != state)
854 parent->sk_data_ready(parent);
856 /* Alas, it is possible again, because we do lookup
857 * in main socket hash table and lock on listening
858 * socket does not protect us more.
860 __sk_add_backlog(child, skb);
863 bh_unlock_sock(child);
867 EXPORT_SYMBOL(tcp_child_process);