1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
4 #include <linux/rcupdate.h>
7 void tcp_fastopen_init_key_once(struct net *net)
9 u8 key[TCP_FASTOPEN_KEY_LENGTH];
10 struct tcp_fastopen_context *ctxt;
13 ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
20 /* tcp_fastopen_reset_cipher publishes the new context
21 * atomically, so we allow this race happening here.
23 * All call sites of tcp_fastopen_cookie_gen also check
24 * for a valid cookie, so this is an acceptable risk.
26 get_random_bytes(key, sizeof(key));
27 tcp_fastopen_reset_cipher(net, NULL, key, NULL);
30 static void tcp_fastopen_ctx_free(struct rcu_head *head)
32 struct tcp_fastopen_context *ctx =
33 container_of(head, struct tcp_fastopen_context, rcu);
38 void tcp_fastopen_destroy_cipher(struct sock *sk)
40 struct tcp_fastopen_context *ctx;
42 ctx = rcu_dereference_protected(
43 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
45 call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
48 void tcp_fastopen_ctx_destroy(struct net *net)
50 struct tcp_fastopen_context *ctxt;
52 ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL);
55 call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
58 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
59 void *primary_key, void *backup_key)
61 struct tcp_fastopen_context *ctx, *octx;
62 struct fastopen_queue *q;
65 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
71 ctx->key[0].key[0] = get_unaligned_le64(primary_key);
72 ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
74 ctx->key[1].key[0] = get_unaligned_le64(backup_key);
75 ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
82 q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
83 octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx);
85 octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx);
89 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
94 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
97 struct tcp_fastopen_context *ctx;
102 ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
104 ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
106 n_keys = tcp_fastopen_context_len(ctx);
107 for (i = 0; i < n_keys; i++) {
108 put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
109 put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
117 static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
119 const siphash_key_t *key,
120 struct tcp_fastopen_cookie *foc)
122 BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
124 if (req->rsk_ops->family == AF_INET) {
125 const struct iphdr *iph = ip_hdr(syn);
127 foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
131 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
134 #if IS_ENABLED(CONFIG_IPV6)
135 if (req->rsk_ops->family == AF_INET6) {
136 const struct ipv6hdr *ip6h = ipv6_hdr(syn);
138 foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
139 sizeof(ip6h->saddr) +
142 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
149 /* Generate the fastopen cookie by applying SipHash to both the source and
150 * destination addresses.
152 static void tcp_fastopen_cookie_gen(struct sock *sk,
153 struct request_sock *req,
155 struct tcp_fastopen_cookie *foc)
157 struct tcp_fastopen_context *ctx;
160 ctx = tcp_fastopen_get_ctx(sk);
162 __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
166 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
167 * queue this additional data / FIN.
169 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
171 struct tcp_sock *tp = tcp_sk(sk);
173 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
176 skb = skb_clone(skb, GFP_ATOMIC);
181 /* segs_in has been initialized to 1 in tcp_create_openreq_child().
182 * Hence, reset segs_in to 0 before calling tcp_segs_in()
183 * to avoid double counting. Also, tcp_segs_in() expects
184 * skb->len to include the tcp_hdrlen. Hence, it should
185 * be called before __skb_pull().
188 tcp_segs_in(tp, skb);
189 __skb_pull(skb, tcp_hdrlen(skb));
190 sk_forced_mem_schedule(sk, skb->truesize);
191 skb_set_owner_r(skb, sk);
193 TCP_SKB_CB(skb)->seq++;
194 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
196 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
197 __skb_queue_tail(&sk->sk_receive_queue, skb);
198 tp->syn_data_acked = 1;
200 /* u64_stats_update_begin(&tp->syncp) not needed here,
201 * as we certainly are not changing upper 32bit value (0)
203 tp->bytes_received = skb->len;
205 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
209 /* returns 0 - no key match, 1 for primary, 2 for backup */
210 static int tcp_fastopen_cookie_gen_check(struct sock *sk,
211 struct request_sock *req,
213 struct tcp_fastopen_cookie *orig,
214 struct tcp_fastopen_cookie *valid_foc)
216 struct tcp_fastopen_cookie search_foc = { .len = -1 };
217 struct tcp_fastopen_cookie *foc = valid_foc;
218 struct tcp_fastopen_context *ctx;
222 ctx = tcp_fastopen_get_ctx(sk);
225 for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
226 __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
227 if (tcp_fastopen_cookie_match(foc, orig)) {
238 static struct sock *tcp_fastopen_create_child(struct sock *sk,
240 struct request_sock *req)
243 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
247 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
252 spin_lock(&queue->fastopenq.lock);
253 queue->fastopenq.qlen++;
254 spin_unlock(&queue->fastopenq.lock);
256 /* Initialize the child socket. Have to fix some values to take
257 * into account the child is a Fast Open socket and is created
258 * only out of the bits carried in the SYN packet.
262 rcu_assign_pointer(tp->fastopen_rsk, req);
263 tcp_rsk(req)->tfo_listener = true;
265 /* RFC1323: The window in SYN & SYN/ACK segments is never
266 * scaled. So correct it appropriately.
268 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
269 tp->max_window = tp->snd_wnd;
271 /* Activate the retrans timer so that SYNACK can be retransmitted.
272 * The request socket is not added to the ehash
273 * because it's been added to the accept queue directly.
275 req->timeout = tcp_timeout_init(child);
276 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
277 req->timeout, TCP_RTO_MAX);
279 refcount_set(&req->rsk_refcnt, 2);
281 /* Now finish processing the fastopen child socket. */
282 tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
284 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
286 tcp_fastopen_add_skb(child, skb);
288 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
289 tp->rcv_wup = tp->rcv_nxt;
290 /* tcp_conn_request() is sending the SYNACK,
291 * and queues the child into listener accept queue.
296 static bool tcp_fastopen_queue_check(struct sock *sk)
298 struct fastopen_queue *fastopenq;
301 /* Make sure the listener has enabled fastopen, and we don't
302 * exceed the max # of pending TFO requests allowed before trying
303 * to validating the cookie in order to avoid burning CPU cycles
306 * XXX (TFO) - The implication of checking the max_qlen before
307 * processing a cookie request is that clients can't differentiate
308 * between qlen overflow causing Fast Open to be disabled
309 * temporarily vs a server not supporting Fast Open at all.
311 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
312 max_qlen = READ_ONCE(fastopenq->max_qlen);
316 if (fastopenq->qlen >= max_qlen) {
317 struct request_sock *req1;
318 spin_lock(&fastopenq->lock);
319 req1 = fastopenq->rskq_rst_head;
320 if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
321 __NET_INC_STATS(sock_net(sk),
322 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
323 spin_unlock(&fastopenq->lock);
326 fastopenq->rskq_rst_head = req1->dl_next;
328 spin_unlock(&fastopenq->lock);
334 static bool tcp_fastopen_no_cookie(const struct sock *sk,
335 const struct dst_entry *dst,
338 return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
339 tcp_sk(sk)->fastopen_no_cookie ||
340 (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
343 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
344 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
345 * cookie request (foc->len == 0).
347 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
348 struct request_sock *req,
349 struct tcp_fastopen_cookie *foc,
350 const struct dst_entry *dst)
352 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
353 int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
354 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
358 if (foc->len == 0) /* Client requests a cookie */
359 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
361 if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
362 (syn_data || foc->len >= 0) &&
363 tcp_fastopen_queue_check(sk))) {
368 if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
372 /* Client requests a cookie. */
373 tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
374 } else if (foc->len > 0) {
375 ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
378 NET_INC_STATS(sock_net(sk),
379 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
381 /* Cookie is valid. Create a (full) child socket to
382 * accept the data in SYN before returning a SYN-ACK to
383 * ack the data. If we fail to create the socket, fall
384 * back and ack the ISN only but includes the same
387 * Note: Data-less SYN with valid cookie is allowed to
388 * send data in SYN_RECV state.
391 child = tcp_fastopen_create_child(sk, skb, req);
394 valid_foc.exp = foc->exp;
396 NET_INC_STATS(sock_net(sk),
397 LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
401 NET_INC_STATS(sock_net(sk),
402 LINUX_MIB_TCPFASTOPENPASSIVE);
405 NET_INC_STATS(sock_net(sk),
406 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
409 valid_foc.exp = foc->exp;
414 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
415 struct tcp_fastopen_cookie *cookie)
417 const struct dst_entry *dst;
419 tcp_fastopen_cache_get(sk, mss, cookie);
421 /* Firewall blackhole issue check */
422 if (tcp_fastopen_active_should_disable(sk)) {
427 dst = __sk_dst_get(sk);
429 if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
435 tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
439 /* This function checks if we want to defer sending SYN until the first
440 * write(). We defer under the following conditions:
441 * 1. fastopen_connect sockopt is set
442 * 2. we have a valid cookie
443 * Return value: return true if we want to defer until application writes data
444 * return false if we want to send out SYN immediately
446 bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
448 struct tcp_fastopen_cookie cookie = { .len = 0 };
449 struct tcp_sock *tp = tcp_sk(sk);
452 if (tp->fastopen_connect && !tp->fastopen_req) {
453 if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
454 inet_set_bit(DEFER_CONNECT, sk);
458 /* Alloc fastopen_req in order for FO option to be included
461 tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
463 if (tp->fastopen_req)
464 tp->fastopen_req->cookie = cookie;
470 EXPORT_SYMBOL(tcp_fastopen_defer_connect);
473 * The following code block is to deal with middle box issues with TFO:
474 * Middlebox firewall issues can potentially cause server's data being
475 * blackholed after a successful 3WHS using TFO.
476 * The proposed solution is to disable active TFO globally under the
477 * following circumstances:
478 * 1. client side TFO socket receives out of order FIN
479 * 2. client side TFO socket receives out of order RST
480 * 3. client side TFO socket has timed out three times consecutively during
482 * We disable active side TFO globally for 1hr at first. Then if it
483 * happens again, we disable it for 2h, then 4h, 8h, ...
484 * And we reset the timeout back to 1hr when we see a successful active
485 * TFO connection with data exchanges.
488 /* Disable active TFO and record current jiffies and
489 * tfo_active_disable_times
491 void tcp_fastopen_active_disable(struct sock *sk)
493 struct net *net = sock_net(sk);
495 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout))
498 /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
499 WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
501 /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
502 * We want net->ipv4.tfo_active_disable_stamp to be updated first.
504 smp_mb__before_atomic();
505 atomic_inc(&net->ipv4.tfo_active_disable_times);
507 NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
510 /* Calculate timeout for tfo active disable
511 * Return true if we are still in the active TFO disable period
512 * Return false if timeout already expired and we should use active TFO
514 bool tcp_fastopen_active_should_disable(struct sock *sk)
516 unsigned int tfo_bh_timeout =
517 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout);
518 unsigned long timeout;
525 tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
529 /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
532 /* Limit timeout to max: 2^6 * initial timeout */
533 multiplier = 1 << min(tfo_da_times - 1, 6);
535 /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
536 timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
537 multiplier * tfo_bh_timeout * HZ;
538 if (time_before(jiffies, timeout))
541 /* Mark check bit so we can check for successful active TFO
542 * condition and reset tfo_active_disable_times
544 tcp_sk(sk)->syn_fastopen_ch = 1;
548 /* Disable active TFO if FIN is the only packet in the ofo queue
549 * and no data is received.
550 * Also check if we can reset tfo_active_disable_times if data is
551 * received successfully on a marked active TFO sockets opened on
552 * a non-loopback interface
554 void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
556 struct tcp_sock *tp = tcp_sk(sk);
557 struct dst_entry *dst;
560 if (!tp->syn_fastopen)
563 if (!tp->data_segs_in) {
564 skb = skb_rb_first(&tp->out_of_order_queue);
565 if (skb && !skb_rb_next(skb)) {
566 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
567 tcp_fastopen_active_disable(sk);
571 } else if (tp->syn_fastopen_ch &&
572 atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
573 dst = sk_dst_get(sk);
574 if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
575 atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
580 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
582 u32 timeouts = inet_csk(sk)->icsk_retransmits;
583 struct tcp_sock *tp = tcp_sk(sk);
585 /* Broken middle-boxes may black-hole Fast Open connection during or
586 * even after the handshake. Be extremely conservative and pause
587 * Fast Open globally after hitting the third consecutive timeout or
588 * exceeding the configured timeout limit.
590 if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
591 (timeouts == 2 || (timeouts < 2 && expired))) {
592 tcp_fastopen_active_disable(sk);
593 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);