1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Definitions for the TCP module.
9 * Version: @(#)tcp.h 1.0.5 05/23/93
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
17 #define FASTRETRANS_DEBUG 1
19 #include <linux/list.h>
20 #include <linux/tcp.h>
21 #include <linux/bug.h>
22 #include <linux/slab.h>
23 #include <linux/cache.h>
24 #include <linux/percpu.h>
25 #include <linux/skbuff.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/indirect_call_wrapper.h>
30 #include <net/inet_connection_sock.h>
31 #include <net/inet_timewait_sock.h>
32 #include <net/inet_hashtables.h>
33 #include <net/checksum.h>
34 #include <net/request_sock.h>
35 #include <net/sock_reuseport.h>
39 #include <net/tcp_states.h>
40 #include <net/inet_ecn.h>
42 #include <net/mptcp.h>
44 #include <linux/seq_file.h>
45 #include <linux/memcontrol.h>
46 #include <linux/bpf-cgroup.h>
47 #include <linux/siphash.h>
49 extern struct inet_hashinfo tcp_hashinfo;
51 DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
52 int tcp_orphan_count_sum(void);
54 void tcp_time_wait(struct sock *sk, int state, int timeo);
56 #define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
57 #define MAX_TCP_OPTION_SPACE 40
58 #define TCP_MIN_SND_MSS 48
59 #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
62 * Never offer a window over 32767 without using window scaling. Some
63 * poor stacks do signed 16bit maths!
65 #define MAX_TCP_WINDOW 32767U
67 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
68 #define TCP_MIN_MSS 88U
70 /* The initial MTU to use for probing */
71 #define TCP_BASE_MSS 1024
73 /* probing interval, default to 10 minutes as per RFC4821 */
74 #define TCP_PROBE_INTERVAL 600
76 /* Specify interval when tcp mtu probing will stop */
77 #define TCP_PROBE_THRESHOLD 8
79 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
80 #define TCP_FASTRETRANS_THRESH 3
82 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
83 #define TCP_MAX_QUICKACKS 16U
85 /* Maximal number of window scale according to RFC1323 */
86 #define TCP_MAX_WSCALE 14U
89 #define TCP_URG_VALID 0x0100
90 #define TCP_URG_NOTYET 0x0200
91 #define TCP_URG_READ 0x0400
93 #define TCP_RETR1 3 /*
94 * This is how many retries it does before it
95 * tries to figure out if the gateway is
96 * down. Minimal RFC value is 3; it corresponds
97 * to ~3sec-8min depending on RTO.
100 #define TCP_RETR2 15 /*
101 * This should take at least
102 * 90 minutes to time out.
103 * RFC1122 says that the limit is 100 sec.
104 * 15 is ~13-30min depending on RTO.
107 #define TCP_SYN_RETRIES 6 /* This is how many retries are done
108 * when active opening a connection.
109 * RFC1122 says the minimum retry MUST
110 * be at least 180secs. Nevertheless
111 * this value is corresponding to
112 * 63secs of retransmission with the
113 * current initial RTO.
116 #define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
117 * when passive opening a connection.
118 * This is corresponding to 31secs of
119 * retransmission with the current
123 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
124 * state, about 60 seconds */
125 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
126 /* BSD style FIN_WAIT2 deadlock breaker.
127 * It used to be 3min, new value is 60sec,
128 * to combine FIN-WAIT-2 timeout with
131 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
133 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
135 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
136 #define TCP_ATO_MIN ((unsigned)(HZ/25))
138 #define TCP_DELACK_MIN 4U
139 #define TCP_ATO_MIN 4U
141 #define TCP_RTO_MAX ((unsigned)(120*HZ))
142 #define TCP_RTO_MIN ((unsigned)(HZ/5))
143 #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
144 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
145 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
146 * used as a fallback RTO for the
147 * initial data transmission if no
148 * valid RTT sample has been acquired,
149 * most likely due to retrans in 3WHS.
152 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
153 * for local resources.
155 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
156 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
157 #define TCP_KEEPALIVE_INTVL (75*HZ)
159 #define MAX_TCP_KEEPIDLE 32767
160 #define MAX_TCP_KEEPINTVL 32767
161 #define MAX_TCP_KEEPCNT 127
162 #define MAX_TCP_SYNCNT 127
164 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
166 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
167 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
168 * after this time. It should be equal
169 * (or greater than) TCP_TIMEWAIT_LEN
170 * to provide reliability equal to one
171 * provided by timewait state.
173 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
174 * timestamps. It must be less than
175 * minimal timewait lifetime.
181 #define TCPOPT_NOP 1 /* Padding */
182 #define TCPOPT_EOL 0 /* End of options */
183 #define TCPOPT_MSS 2 /* Segment size negotiating */
184 #define TCPOPT_WINDOW 3 /* Window scaling */
185 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
186 #define TCPOPT_SACK 5 /* SACK Block */
187 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
188 #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
189 #define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
190 #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
191 #define TCPOPT_EXP 254 /* Experimental */
192 /* Magic number to be after the option value for sharing TCP
193 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
195 #define TCPOPT_FASTOPEN_MAGIC 0xF989
196 #define TCPOPT_SMC_MAGIC 0xE2D4C3D9
202 #define TCPOLEN_MSS 4
203 #define TCPOLEN_WINDOW 3
204 #define TCPOLEN_SACK_PERM 2
205 #define TCPOLEN_TIMESTAMP 10
206 #define TCPOLEN_MD5SIG 18
207 #define TCPOLEN_FASTOPEN_BASE 2
208 #define TCPOLEN_EXP_FASTOPEN_BASE 4
209 #define TCPOLEN_EXP_SMC_BASE 6
211 /* But this is what stacks really send out. */
212 #define TCPOLEN_TSTAMP_ALIGNED 12
213 #define TCPOLEN_WSCALE_ALIGNED 4
214 #define TCPOLEN_SACKPERM_ALIGNED 4
215 #define TCPOLEN_SACK_BASE 2
216 #define TCPOLEN_SACK_BASE_ALIGNED 4
217 #define TCPOLEN_SACK_PERBLOCK 8
218 #define TCPOLEN_MD5SIG_ALIGNED 20
219 #define TCPOLEN_MSS_ALIGNED 4
220 #define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
222 /* Flags in tp->nonagle */
223 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
224 #define TCP_NAGLE_CORK 2 /* Socket is corked */
225 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
227 /* TCP thin-stream limits */
228 #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
230 /* TCP initial congestion window as per rfc6928 */
231 #define TCP_INIT_CWND 10
233 /* Bit Flags for sysctl_tcp_fastopen */
234 #define TFO_CLIENT_ENABLE 1
235 #define TFO_SERVER_ENABLE 2
236 #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
238 /* Accept SYN data w/o any cookie option */
239 #define TFO_SERVER_COOKIE_NOT_REQD 0x200
241 /* Force enable TFO on all listeners, i.e., not requiring the
242 * TCP_FASTOPEN socket option.
244 #define TFO_SERVER_WO_SOCKOPT1 0x400
247 /* sysctl variables for tcp */
248 extern int sysctl_tcp_max_orphans;
249 extern long sysctl_tcp_mem[3];
251 #define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
252 #define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
253 #define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
255 extern atomic_long_t tcp_memory_allocated;
256 extern struct percpu_counter tcp_sockets_allocated;
257 extern unsigned long tcp_memory_pressure;
259 /* optimized version of sk_under_memory_pressure() for TCP sockets */
260 static inline bool tcp_under_memory_pressure(const struct sock *sk)
262 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
263 mem_cgroup_under_socket_pressure(sk->sk_memcg))
266 return READ_ONCE(tcp_memory_pressure);
269 * The next routines deal with comparing 32 bit unsigned ints
270 * and worry about wraparound (automatic with unsigned arithmetic).
273 static inline bool before(__u32 seq1, __u32 seq2)
275 return (__s32)(seq1-seq2) < 0;
277 #define after(seq2, seq1) before(seq1, seq2)
279 /* is s2<=s1<=s3 ? */
280 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
282 return seq3 - seq2 >= seq1 - seq2;
285 static inline bool tcp_out_of_memory(struct sock *sk)
287 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
288 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
293 void sk_forced_mem_schedule(struct sock *sk, int size);
295 bool tcp_check_oom(struct sock *sk, int shift);
298 extern struct proto tcp_prot;
300 #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
301 #define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
302 #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
303 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
305 void tcp_tasklet_init(void);
307 int tcp_v4_err(struct sk_buff *skb, u32);
309 void tcp_shutdown(struct sock *sk, int how);
311 int tcp_v4_early_demux(struct sk_buff *skb);
312 int tcp_v4_rcv(struct sk_buff *skb);
314 void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb);
315 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
316 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
317 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
318 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
320 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
321 size_t size, int flags);
322 struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
323 struct page *page, int offset, size_t *size);
324 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
325 size_t size, int flags);
326 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
327 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
329 void tcp_release_cb(struct sock *sk);
330 void tcp_wfree(struct sk_buff *skb);
331 void tcp_write_timer_handler(struct sock *sk);
332 void tcp_delack_timer_handler(struct sock *sk);
333 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
334 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
335 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
336 void tcp_rcv_space_adjust(struct sock *sk);
337 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
338 void tcp_twsk_destructor(struct sock *sk);
339 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
340 struct pipe_inode_info *pipe, size_t len,
343 void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
344 static inline void tcp_dec_quickack_mode(struct sock *sk,
345 const unsigned int pkts)
347 struct inet_connection_sock *icsk = inet_csk(sk);
349 if (icsk->icsk_ack.quick) {
350 if (pkts >= icsk->icsk_ack.quick) {
351 icsk->icsk_ack.quick = 0;
352 /* Leaving quickack mode we deflate ATO. */
353 icsk->icsk_ack.ato = TCP_ATO_MIN;
355 icsk->icsk_ack.quick -= pkts;
360 #define TCP_ECN_QUEUE_CWR 2
361 #define TCP_ECN_DEMAND_CWR 4
362 #define TCP_ECN_SEEN 8
372 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
374 const struct tcphdr *th);
375 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
376 struct request_sock *req, bool fastopen,
378 int tcp_child_process(struct sock *parent, struct sock *child,
379 struct sk_buff *skb);
380 void tcp_enter_loss(struct sock *sk);
381 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
382 void tcp_clear_retrans(struct tcp_sock *tp);
383 void tcp_update_metrics(struct sock *sk);
384 void tcp_init_metrics(struct sock *sk);
385 void tcp_metrics_init(void);
386 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
387 void __tcp_close(struct sock *sk, long timeout);
388 void tcp_close(struct sock *sk, long timeout);
389 void tcp_init_sock(struct sock *sk);
390 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
391 __poll_t tcp_poll(struct file *file, struct socket *sock,
392 struct poll_table_struct *wait);
393 int tcp_getsockopt(struct sock *sk, int level, int optname,
394 char __user *optval, int __user *optlen);
395 bool tcp_bpf_bypass_getsockopt(int level, int optname);
396 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
397 unsigned int optlen);
398 void tcp_set_keepalive(struct sock *sk, int val);
399 void tcp_syn_ack_timeout(const struct request_sock *req);
400 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
401 int flags, int *addr_len);
402 int tcp_set_rcvlowat(struct sock *sk, int val);
403 int tcp_set_window_clamp(struct sock *sk, int val);
404 void tcp_update_recv_tstamps(struct sk_buff *skb,
405 struct scm_timestamping_internal *tss);
406 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
407 struct scm_timestamping_internal *tss);
408 void tcp_data_ready(struct sock *sk);
410 int tcp_mmap(struct file *file, struct socket *sock,
411 struct vm_area_struct *vma);
413 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
414 struct tcp_options_received *opt_rx,
415 int estab, struct tcp_fastopen_cookie *foc);
416 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
419 * BPF SKB-less helpers
421 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
422 struct tcphdr *th, u32 *cookie);
423 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
424 struct tcphdr *th, u32 *cookie);
425 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
426 const struct tcp_request_sock_ops *af_ops,
427 struct sock *sk, struct tcphdr *th);
429 * TCP v4 functions exported for the inet6 API
432 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
433 void tcp_v4_mtu_reduced(struct sock *sk);
434 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
435 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
436 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
437 struct sock *tcp_create_openreq_child(const struct sock *sk,
438 struct request_sock *req,
439 struct sk_buff *skb);
440 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
441 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
442 struct request_sock *req,
443 struct dst_entry *dst,
444 struct request_sock *req_unhash,
446 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
447 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
448 int tcp_connect(struct sock *sk);
449 enum tcp_synack_type {
454 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
455 struct request_sock *req,
456 struct tcp_fastopen_cookie *foc,
457 enum tcp_synack_type synack_type,
458 struct sk_buff *syn_skb);
459 int tcp_disconnect(struct sock *sk, int flags);
461 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
462 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
463 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
465 /* From syncookies.c */
466 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
467 struct request_sock *req,
468 struct dst_entry *dst, u32 tsoff);
469 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
471 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
472 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
473 const struct tcp_request_sock_ops *af_ops,
474 struct sock *sk, struct sk_buff *skb);
475 #ifdef CONFIG_SYN_COOKIES
477 /* Syncookies use a monotonic timer which increments every 60 seconds.
478 * This counter is used both as a hash input and partially encoded into
479 * the cookie value. A cookie is only validated further if the delta
480 * between the current counter value and the encoded one is less than this,
481 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
482 * the counter advances immediately after a cookie is generated).
484 #define MAX_SYNCOOKIE_AGE 2
485 #define TCP_SYNCOOKIE_PERIOD (60 * HZ)
486 #define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
488 /* syncookies: remember time of last synqueue overflow
489 * But do not dirty this field too often (once per second is enough)
490 * It is racy as we do not hold a lock, but race is very minor.
492 static inline void tcp_synq_overflow(const struct sock *sk)
494 unsigned int last_overflow;
495 unsigned int now = jiffies;
497 if (sk->sk_reuseport) {
498 struct sock_reuseport *reuse;
500 reuse = rcu_dereference(sk->sk_reuseport_cb);
502 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
503 if (!time_between32(now, last_overflow,
505 WRITE_ONCE(reuse->synq_overflow_ts, now);
510 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
511 if (!time_between32(now, last_overflow, last_overflow + HZ))
512 WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
515 /* syncookies: no recent synqueue overflow on this listening socket? */
516 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
518 unsigned int last_overflow;
519 unsigned int now = jiffies;
521 if (sk->sk_reuseport) {
522 struct sock_reuseport *reuse;
524 reuse = rcu_dereference(sk->sk_reuseport_cb);
526 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
527 return !time_between32(now, last_overflow - HZ,
529 TCP_SYNCOOKIE_VALID);
533 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
535 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
536 * then we're under synflood. However, we have to use
537 * 'last_overflow - HZ' as lower bound. That's because a concurrent
538 * tcp_synq_overflow() could update .ts_recent_stamp after we read
539 * jiffies but before we store .ts_recent_stamp into last_overflow,
540 * which could lead to rejecting a valid syncookie.
542 return !time_between32(now, last_overflow - HZ,
543 last_overflow + TCP_SYNCOOKIE_VALID);
546 static inline u32 tcp_cookie_time(void)
548 u64 val = get_jiffies_64();
550 do_div(val, TCP_SYNCOOKIE_PERIOD);
554 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
556 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
557 u64 cookie_init_timestamp(struct request_sock *req, u64 now);
558 bool cookie_timestamp_decode(const struct net *net,
559 struct tcp_options_received *opt);
560 bool cookie_ecn_ok(const struct tcp_options_received *opt,
561 const struct net *net, const struct dst_entry *dst);
563 /* From net/ipv6/syncookies.c */
564 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
566 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
568 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
569 const struct tcphdr *th, u16 *mssp);
570 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
574 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
576 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
577 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
578 void tcp_retransmit_timer(struct sock *sk);
579 void tcp_xmit_retransmit_queue(struct sock *);
580 void tcp_simple_retransmit(struct sock *);
581 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
582 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
584 TCP_FRAG_IN_WRITE_QUEUE,
585 TCP_FRAG_IN_RTX_QUEUE,
587 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
588 struct sk_buff *skb, u32 len,
589 unsigned int mss_now, gfp_t gfp);
591 void tcp_send_probe0(struct sock *);
592 void tcp_send_partial(struct sock *);
593 int tcp_write_wakeup(struct sock *, int mib);
594 void tcp_send_fin(struct sock *sk);
595 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
596 int tcp_send_synack(struct sock *);
597 void tcp_push_one(struct sock *, unsigned int mss_now);
598 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
599 void tcp_send_ack(struct sock *sk);
600 void tcp_send_delayed_ack(struct sock *sk);
601 void tcp_send_loss_probe(struct sock *sk);
602 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
603 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
604 const struct sk_buff *next_skb);
607 void tcp_rearm_rto(struct sock *sk);
608 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
609 void tcp_reset(struct sock *sk, struct sk_buff *skb);
610 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
611 void tcp_fin(struct sock *sk);
612 void tcp_check_space(struct sock *sk);
615 void tcp_init_xmit_timers(struct sock *);
616 static inline void tcp_clear_xmit_timers(struct sock *sk)
618 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
621 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
624 inet_csk_clear_xmit_timers(sk);
627 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
628 unsigned int tcp_current_mss(struct sock *sk);
629 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
631 /* Bound MSS / TSO packet size with the half of the window */
632 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
636 /* When peer uses tiny windows, there is no use in packetizing
637 * to sub-MSS pieces for the sake of SWS or making sure there
638 * are enough packets in the pipe for fast recovery.
640 * On the other hand, for extremely large MSS devices, handling
641 * smaller than MSS windows in this way does make sense.
643 if (tp->max_window > TCP_MSS_DEFAULT)
644 cutoff = (tp->max_window >> 1);
646 cutoff = tp->max_window;
648 if (cutoff && pktsize > cutoff)
649 return max_t(int, cutoff, 68U - tp->tcp_header_len);
655 void tcp_get_info(struct sock *, struct tcp_info *);
657 /* Read 'sendfile()'-style from a TCP socket */
658 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
659 sk_read_actor_t recv_actor);
661 void tcp_initialize_rcv_mss(struct sock *sk);
663 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
664 int tcp_mss_to_mtu(struct sock *sk, int mss);
665 void tcp_mtup_init(struct sock *sk);
667 static inline void tcp_bound_rto(const struct sock *sk)
669 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
670 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
673 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
675 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
678 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
680 /* mptcp hooks are only on the slow path */
681 if (sk_is_mptcp((struct sock *)tp))
684 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
685 ntohl(TCP_FLAG_ACK) |
689 static inline void tcp_fast_path_on(struct tcp_sock *tp)
691 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
694 static inline void tcp_fast_path_check(struct sock *sk)
696 struct tcp_sock *tp = tcp_sk(sk);
698 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
700 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
702 tcp_fast_path_on(tp);
705 /* Compute the actual rto_min value */
706 static inline u32 tcp_rto_min(struct sock *sk)
708 const struct dst_entry *dst = __sk_dst_get(sk);
709 u32 rto_min = inet_csk(sk)->icsk_rto_min;
711 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
712 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
716 static inline u32 tcp_rto_min_us(struct sock *sk)
718 return jiffies_to_usecs(tcp_rto_min(sk));
721 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
723 return dst_metric_locked(dst, RTAX_CC_ALGO);
726 /* Minimum RTT in usec. ~0 means not available. */
727 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
729 return minmax_get(&tp->rtt_min);
732 /* Compute the actual receive window we are currently advertising.
733 * Rcv_nxt can be after the window if our peer push more data
734 * than the offered window.
736 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
738 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
745 /* Choose a new window, without checks for shrinking, and without
746 * scaling applied to the result. The caller does these things
747 * if necessary. This is a "raw" window selection.
749 u32 __tcp_select_window(struct sock *sk);
751 void tcp_send_window_probe(struct sock *sk);
753 /* TCP uses 32bit jiffies to save some space.
754 * Note that this is different from tcp_time_stamp, which
755 * historically has been the same until linux-4.13.
757 #define tcp_jiffies32 ((u32)jiffies)
760 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
761 * It is no longer tied to jiffies, but to 1 ms clock.
762 * Note: double check if you want to use tcp_jiffies32 instead of this.
764 #define TCP_TS_HZ 1000
766 static inline u64 tcp_clock_ns(void)
768 return ktime_get_ns();
771 static inline u64 tcp_clock_us(void)
773 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
776 /* This should only be used in contexts where tp->tcp_mstamp is up to date */
777 static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
779 return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
782 /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
783 static inline u32 tcp_ns_to_ts(u64 ns)
785 return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
788 /* Could use tcp_clock_us() / 1000, but this version uses a single divide */
789 static inline u32 tcp_time_stamp_raw(void)
791 return tcp_ns_to_ts(tcp_clock_ns());
794 void tcp_mstamp_refresh(struct tcp_sock *tp);
796 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
798 return max_t(s64, t1 - t0, 0);
801 static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
803 return tcp_ns_to_ts(skb->skb_mstamp_ns);
806 /* provide the departure time in us unit */
807 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
809 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
813 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
815 #define TCPHDR_FIN 0x01
816 #define TCPHDR_SYN 0x02
817 #define TCPHDR_RST 0x04
818 #define TCPHDR_PSH 0x08
819 #define TCPHDR_ACK 0x10
820 #define TCPHDR_URG 0x20
821 #define TCPHDR_ECE 0x40
822 #define TCPHDR_CWR 0x80
824 #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
826 /* This is what the send packet queuing engine uses to pass
827 * TCP per-packet control information to the transmission code.
828 * We also store the host-order sequence numbers in here too.
829 * This is 44 bytes if IPV6 is enabled.
830 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
833 __u32 seq; /* Starting sequence number */
834 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
836 /* Note : tcp_tw_isn is used in input path only
837 * (isn chosen by tcp_timewait_state_process())
839 * tcp_gso_segs/size are used in write queue only,
840 * cf tcp_skb_pcount()/tcp_skb_mss()
848 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
850 __u8 sacked; /* State flags for SACK. */
851 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
852 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
853 #define TCPCB_LOST 0x04 /* SKB is lost */
854 #define TCPCB_TAGBITS 0x07 /* All tag bits */
855 #define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
856 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
857 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
860 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
861 __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
862 eor:1, /* Is skb MSG_EOR marked? */
863 has_rxtstamp:1, /* SKB has a RX timestamp */
865 __u32 ack_seq; /* Sequence number ACK'd */
868 /* There is space for up to 24 bytes */
869 __u32 in_flight:30,/* Bytes in flight at transmit */
870 is_app_limited:1, /* cwnd not fully used? */
872 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
874 /* start of send pipeline phase */
876 /* when we reached the "delivered" count */
877 u64 delivered_mstamp;
878 } tx; /* only used for outgoing skbs */
880 struct inet_skb_parm h4;
881 #if IS_ENABLED(CONFIG_IPV6)
882 struct inet6_skb_parm h6;
884 } header; /* For incoming skbs */
888 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
890 extern const struct inet_connection_sock_af_ops ipv4_specific;
892 #if IS_ENABLED(CONFIG_IPV6)
893 /* This is the variant of inet6_iif() that must be used by TCP,
894 * as TCP moves IP6CB into a different location in skb->cb[]
896 static inline int tcp_v6_iif(const struct sk_buff *skb)
898 return TCP_SKB_CB(skb)->header.h6.iif;
901 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
903 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
905 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
908 /* TCP_SKB_CB reference means this can not be used from early demux */
909 static inline int tcp_v6_sdif(const struct sk_buff *skb)
911 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
912 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
913 return TCP_SKB_CB(skb)->header.h6.iif;
918 extern const struct inet_connection_sock_af_ops ipv6_specific;
920 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
921 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
922 INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
926 /* TCP_SKB_CB reference means this can not be used from early demux */
927 static inline int tcp_v4_sdif(struct sk_buff *skb)
929 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
930 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
931 return TCP_SKB_CB(skb)->header.h4.iif;
936 /* Due to TSO, an SKB can be composed of multiple actual
937 * packets. To keep these tracked properly, we use this.
939 static inline int tcp_skb_pcount(const struct sk_buff *skb)
941 return TCP_SKB_CB(skb)->tcp_gso_segs;
944 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
946 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
949 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
951 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
954 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
955 static inline int tcp_skb_mss(const struct sk_buff *skb)
957 return TCP_SKB_CB(skb)->tcp_gso_size;
960 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
962 return likely(!TCP_SKB_CB(skb)->eor);
965 static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
966 const struct sk_buff *from)
968 return likely(tcp_skb_can_collapse_to(to) &&
969 mptcp_skb_can_collapse(to, from));
972 /* Events passed to congestion control interface */
974 CA_EVENT_TX_START, /* first transmit when no packets in flight */
975 CA_EVENT_CWND_RESTART, /* congestion window restart */
976 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
977 CA_EVENT_LOSS, /* loss timeout */
978 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
979 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
982 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
983 enum tcp_ca_ack_event_flags {
984 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
985 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
986 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
990 * Interface for adding new TCP congestion control handlers
992 #define TCP_CA_NAME_MAX 16
993 #define TCP_CA_MAX 128
994 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
996 #define TCP_CA_UNSPEC 0
998 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
999 #define TCP_CONG_NON_RESTRICTED 0x1
1000 /* Requires ECN/ECT set on all packets */
1001 #define TCP_CONG_NEEDS_ECN 0x2
1002 #define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1012 /* A rate sample measures the number of (original/retransmitted) data
1013 * packets delivered "delivered" over an interval of time "interval_us".
1014 * The tcp_rate.c code fills in the rate sample, and congestion
1015 * control modules that define a cong_control function to run at the end
1016 * of ACK processing can optionally chose to consult this sample when
1017 * setting cwnd and pacing rate.
1018 * A sample is invalid if "delivered" or "interval_us" is negative.
1020 struct rate_sample {
1021 u64 prior_mstamp; /* starting timestamp for interval */
1022 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
1023 s32 delivered; /* number of packets delivered over interval */
1024 long interval_us; /* time for tp->delivered to incr "delivered" */
1025 u32 snd_interval_us; /* snd interval for delivered packets */
1026 u32 rcv_interval_us; /* rcv interval for delivered packets */
1027 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1028 int losses; /* number of packets marked lost upon ACK */
1029 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1030 u32 prior_in_flight; /* in flight before this ACK */
1031 u32 last_end_seq; /* end_seq of most recently ACKed packet */
1032 bool is_app_limited; /* is sample from packet with bubble in pipe? */
1033 bool is_retrans; /* is sample from retransmission? */
1034 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
1037 struct tcp_congestion_ops {
1038 /* fast path fields are put first to fill one cache line */
1040 /* return slow start threshold (required) */
1041 u32 (*ssthresh)(struct sock *sk);
1043 /* do new cwnd calculation (required) */
1044 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1046 /* call before changing ca_state (optional) */
1047 void (*set_state)(struct sock *sk, u8 new_state);
1049 /* call when cwnd event occurs (optional) */
1050 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1052 /* call when ack arrives (optional) */
1053 void (*in_ack_event)(struct sock *sk, u32 flags);
1055 /* hook for packet ack accounting (optional) */
1056 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1058 /* override sysctl_tcp_min_tso_segs */
1059 u32 (*min_tso_segs)(struct sock *sk);
1061 /* call when packets are delivered to update cwnd and pacing rate,
1062 * after all the ca_state processing. (optional)
1064 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1067 /* new value of cwnd after loss (required) */
1068 u32 (*undo_cwnd)(struct sock *sk);
1069 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1070 u32 (*sndbuf_expand)(struct sock *sk);
1072 /* control/slow paths put last */
1073 /* get info for inet_diag (optional) */
1074 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1075 union tcp_cc_info *info);
1077 char name[TCP_CA_NAME_MAX];
1078 struct module *owner;
1079 struct list_head list;
1083 /* initialize private data (optional) */
1084 void (*init)(struct sock *sk);
1085 /* cleanup private data (optional) */
1086 void (*release)(struct sock *sk);
1087 } ____cacheline_aligned_in_smp;
1089 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1090 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1092 void tcp_assign_congestion_control(struct sock *sk);
1093 void tcp_init_congestion_control(struct sock *sk);
1094 void tcp_cleanup_congestion_control(struct sock *sk);
1095 int tcp_set_default_congestion_control(struct net *net, const char *name);
1096 void tcp_get_default_congestion_control(struct net *net, char *name);
1097 void tcp_get_available_congestion_control(char *buf, size_t len);
1098 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1099 int tcp_set_allowed_congestion_control(char *allowed);
1100 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1101 bool cap_net_admin);
1102 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1103 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1105 u32 tcp_reno_ssthresh(struct sock *sk);
1106 u32 tcp_reno_undo_cwnd(struct sock *sk);
1107 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1108 extern struct tcp_congestion_ops tcp_reno;
1110 struct tcp_congestion_ops *tcp_ca_find(const char *name);
1111 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1112 u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1114 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1116 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1122 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1124 const struct inet_connection_sock *icsk = inet_csk(sk);
1126 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1129 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1131 struct inet_connection_sock *icsk = inet_csk(sk);
1133 if (icsk->icsk_ca_ops->set_state)
1134 icsk->icsk_ca_ops->set_state(sk, ca_state);
1135 icsk->icsk_ca_state = ca_state;
1138 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1140 const struct inet_connection_sock *icsk = inet_csk(sk);
1142 if (icsk->icsk_ca_ops->cwnd_event)
1143 icsk->icsk_ca_ops->cwnd_event(sk, event);
1146 /* From tcp_rate.c */
1147 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1148 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1149 struct rate_sample *rs);
1150 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1151 bool is_sack_reneg, struct rate_sample *rs);
1152 void tcp_rate_check_app_limited(struct sock *sk);
1154 static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1156 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1159 /* These functions determine how the current flow behaves in respect of SACK
1160 * handling. SACK is negotiated with the peer, and therefore it can vary
1161 * between different flows.
1163 * tcp_is_sack - SACK enabled
1164 * tcp_is_reno - No SACK
1166 static inline int tcp_is_sack(const struct tcp_sock *tp)
1168 return likely(tp->rx_opt.sack_ok);
1171 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1173 return !tcp_is_sack(tp);
1176 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1178 return tp->sacked_out + tp->lost_out;
1181 /* This determines how many packets are "in the network" to the best
1182 * of our knowledge. In many cases it is conservative, but where
1183 * detailed information is available from the receiver (via SACK
1184 * blocks etc.) we can make more aggressive calculations.
1186 * Use this for decisions involving congestion control, use just
1187 * tp->packets_out to determine if the send queue is empty or not.
1189 * Read this equation as:
1191 * "Packets sent once on transmission queue" MINUS
1192 * "Packets left network, but not honestly ACKed yet" PLUS
1193 * "Packets fast retransmitted"
1195 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1197 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1200 #define TCP_INFINITE_SSTHRESH 0x7fffffff
1202 static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1204 return tp->snd_cwnd;
1207 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1209 WARN_ON_ONCE((int)val <= 0);
1213 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1215 return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1218 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1220 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1223 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1225 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1226 (1 << inet_csk(sk)->icsk_ca_state);
1229 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1230 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1233 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1235 const struct tcp_sock *tp = tcp_sk(sk);
1237 if (tcp_in_cwnd_reduction(sk))
1238 return tp->snd_ssthresh;
1240 return max(tp->snd_ssthresh,
1241 ((tcp_snd_cwnd(tp) >> 1) +
1242 (tcp_snd_cwnd(tp) >> 2)));
1245 /* Use define here intentionally to get WARN_ON location shown at the caller */
1246 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1248 void tcp_enter_cwr(struct sock *sk);
1249 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1251 /* The maximum number of MSS of available cwnd for which TSO defers
1252 * sending if not using sysctl_tcp_tso_win_divisor.
1254 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1259 /* Returns end sequence number of the receiver's advertised window */
1260 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1262 return tp->snd_una + tp->snd_wnd;
1265 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1266 * flexible approach. The RFC suggests cwnd should not be raised unless
1267 * it was fully used previously. And that's exactly what we do in
1268 * congestion avoidance mode. But in slow start we allow cwnd to grow
1269 * as long as the application has used half the cwnd.
1271 * cwnd is 10 (IW10), but application sends 9 frames.
1272 * We allow cwnd to reach 18 when all frames are ACKed.
1273 * This check is safe because it's as aggressive as slow start which already
1274 * risks 100% overshoot. The advantage is that we discourage application to
1275 * either send more filler packets or data to artificially blow up the cwnd
1276 * usage, and allow application-limited process to probe bw more aggressively.
1278 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1280 const struct tcp_sock *tp = tcp_sk(sk);
1282 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1283 if (tcp_in_slow_start(tp))
1284 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1286 return tp->is_cwnd_limited;
1289 /* BBR congestion control needs pacing.
1290 * Same remark for SO_MAX_PACING_RATE.
1291 * sch_fq packet scheduler is efficiently handling pacing,
1292 * but is not always installed/used.
1293 * Return true if TCP stack should pace packets itself.
1295 static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1297 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1300 /* Estimates in how many jiffies next packet for this flow can be sent.
1301 * Scheduling a retransmit timer too early would be silly.
1303 static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1305 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1307 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1310 static inline void tcp_reset_xmit_timer(struct sock *sk,
1313 const unsigned long max_when)
1315 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1319 /* Something is really bad, we could not queue an additional packet,
1320 * because qdisc is full or receiver sent a 0 window, or we are paced.
1321 * We do not want to add fuel to the fire, or abort too early,
1322 * so make sure the timer we arm now is at least 200ms in the future,
1323 * regardless of current icsk_rto value (as it could be ~2ms)
1325 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1327 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1330 /* Variant of inet_csk_rto_backoff() used for zero window probes */
1331 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1332 unsigned long max_when)
1334 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1335 inet_csk(sk)->icsk_backoff);
1336 u64 when = (u64)tcp_probe0_base(sk) << backoff;
1338 return (unsigned long)min_t(u64, when, max_when);
1341 static inline void tcp_check_probe_timer(struct sock *sk)
1343 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1344 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1345 tcp_probe0_base(sk), TCP_RTO_MAX);
1348 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1353 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1359 * Calculate(/check) TCP checksum
1361 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1362 __be32 daddr, __wsum base)
1364 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1367 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1369 return !skb_csum_unnecessary(skb) &&
1370 __skb_checksum_complete(skb);
1373 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1374 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1375 void tcp_set_state(struct sock *sk, int state);
1376 void tcp_done(struct sock *sk);
1377 int tcp_abort(struct sock *sk, int err);
1379 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1382 rx_opt->num_sacks = 0;
1385 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1387 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1389 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1390 struct tcp_sock *tp = tcp_sk(sk);
1393 if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
1394 ca_ops->cong_control)
1396 delta = tcp_jiffies32 - tp->lsndtime;
1397 if (delta > inet_csk(sk)->icsk_rto)
1398 tcp_cwnd_restart(sk, delta);
1401 /* Determine a window scaling and initial window to offer. */
1402 void tcp_select_initial_window(const struct sock *sk, int __space,
1403 __u32 mss, __u32 *rcv_wnd,
1404 __u32 *window_clamp, int wscale_ok,
1405 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1407 static inline int tcp_win_from_space(const struct sock *sk, int space)
1409 int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
1411 return tcp_adv_win_scale <= 0 ?
1412 (space>>(-tcp_adv_win_scale)) :
1413 space - (space>>tcp_adv_win_scale);
1416 /* Note: caller must be prepared to deal with negative returns */
1417 static inline int tcp_space(const struct sock *sk)
1419 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1420 READ_ONCE(sk->sk_backlog.len) -
1421 atomic_read(&sk->sk_rmem_alloc));
1424 static inline int tcp_full_space(const struct sock *sk)
1426 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1429 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1431 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1432 * If 87.5 % (7/8) of the space has been consumed, we want to override
1433 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1434 * len/truesize ratio.
1436 static inline bool tcp_rmem_pressure(const struct sock *sk)
1438 int rcvbuf, threshold;
1440 if (tcp_under_memory_pressure(sk))
1443 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1444 threshold = rcvbuf - (rcvbuf >> 3);
1446 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1449 static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1451 const struct tcp_sock *tp = tcp_sk(sk);
1452 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1457 return (avail >= target) || tcp_rmem_pressure(sk) ||
1458 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1461 extern void tcp_openreq_init_rwin(struct request_sock *req,
1462 const struct sock *sk_listener,
1463 const struct dst_entry *dst);
1465 void tcp_enter_memory_pressure(struct sock *sk);
1466 void tcp_leave_memory_pressure(struct sock *sk);
1468 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1470 struct net *net = sock_net((struct sock *)tp);
1472 return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1475 static inline int keepalive_time_when(const struct tcp_sock *tp)
1477 struct net *net = sock_net((struct sock *)tp);
1479 return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1482 static inline int keepalive_probes(const struct tcp_sock *tp)
1484 struct net *net = sock_net((struct sock *)tp);
1486 return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1489 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1491 const struct inet_connection_sock *icsk = &tp->inet_conn;
1493 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1494 tcp_jiffies32 - tp->rcv_tstamp);
1497 static inline int tcp_fin_time(const struct sock *sk)
1499 int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1500 const int rto = inet_csk(sk)->icsk_rto;
1502 if (fin_timeout < (rto << 2) - (rto >> 1))
1503 fin_timeout = (rto << 2) - (rto >> 1);
1508 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1511 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1513 if (unlikely(!time_before32(ktime_get_seconds(),
1514 rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1517 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1518 * then following tcp messages have valid values. Ignore 0 value,
1519 * or else 'negative' tsval might forbid us to accept their packets.
1521 if (!rx_opt->ts_recent)
1526 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1529 if (tcp_paws_check(rx_opt, 0))
1532 /* RST segments are not recommended to carry timestamp,
1533 and, if they do, it is recommended to ignore PAWS because
1534 "their cleanup function should take precedence over timestamps."
1535 Certainly, it is mistake. It is necessary to understand the reasons
1536 of this constraint to relax it: if peer reboots, clock may go
1537 out-of-sync and half-open connections will not be reset.
1538 Actually, the problem would be not existing if all
1539 the implementations followed draft about maintaining clock
1540 via reboots. Linux-2.2 DOES NOT!
1542 However, we can relax time bounds for RST segments to MSL.
1544 if (rst && !time_before32(ktime_get_seconds(),
1545 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1550 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1551 int mib_idx, u32 *last_oow_ack_time);
1553 static inline void tcp_mib_init(struct net *net)
1556 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1557 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1558 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1559 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1563 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1565 tp->lost_skb_hint = NULL;
1568 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1570 tcp_clear_retrans_hints_partial(tp);
1571 tp->retransmit_skb_hint = NULL;
1574 union tcp_md5_addr {
1576 #if IS_ENABLED(CONFIG_IPV6)
1581 /* - key database */
1582 struct tcp_md5sig_key {
1583 struct hlist_node node;
1585 u8 family; /* AF_INET or AF_INET6 */
1588 union tcp_md5_addr addr;
1589 int l3index; /* set if key added with L3 scope */
1590 u8 key[TCP_MD5SIG_MAXKEYLEN];
1591 struct rcu_head rcu;
1595 struct tcp_md5sig_info {
1596 struct hlist_head head;
1597 struct rcu_head rcu;
1600 /* - pseudo header */
1601 struct tcp4_pseudohdr {
1609 struct tcp6_pseudohdr {
1610 struct in6_addr saddr;
1611 struct in6_addr daddr;
1613 __be32 protocol; /* including padding */
1616 union tcp_md5sum_block {
1617 struct tcp4_pseudohdr ip4;
1618 #if IS_ENABLED(CONFIG_IPV6)
1619 struct tcp6_pseudohdr ip6;
1623 /* - pool: digest algorithm, hash description and scratch buffer */
1624 struct tcp_md5sig_pool {
1625 struct ahash_request *md5_req;
1630 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1631 const struct sock *sk, const struct sk_buff *skb);
1632 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1633 int family, u8 prefixlen, int l3index, u8 flags,
1634 const u8 *newkey, u8 newkeylen, gfp_t gfp);
1635 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1636 int family, u8 prefixlen, int l3index, u8 flags);
1637 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1638 const struct sock *addr_sk);
1640 #ifdef CONFIG_TCP_MD5SIG
1641 #include <linux/jump_label.h>
1642 extern struct static_key_false tcp_md5_needed;
1643 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1644 const union tcp_md5_addr *addr,
1646 static inline struct tcp_md5sig_key *
1647 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1648 const union tcp_md5_addr *addr, int family)
1650 if (!static_branch_unlikely(&tcp_md5_needed))
1652 return __tcp_md5_do_lookup(sk, l3index, addr, family);
1655 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1657 static inline struct tcp_md5sig_key *
1658 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1659 const union tcp_md5_addr *addr, int family)
1663 #define tcp_twsk_md5_key(twsk) NULL
1666 bool tcp_alloc_md5sig_pool(void);
1668 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1669 static inline void tcp_put_md5sig_pool(void)
1674 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1675 unsigned int header_len);
1676 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1677 const struct tcp_md5sig_key *key);
1679 /* From tcp_fastopen.c */
1680 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1681 struct tcp_fastopen_cookie *cookie);
1682 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1683 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1685 struct tcp_fastopen_request {
1686 /* Fast Open cookie. Size 0 means a cookie request */
1687 struct tcp_fastopen_cookie cookie;
1688 struct msghdr *data; /* data in MSG_FASTOPEN */
1690 int copied; /* queued in tcp_connect() */
1691 struct ubuf_info *uarg;
1693 void tcp_free_fastopen_req(struct tcp_sock *tp);
1694 void tcp_fastopen_destroy_cipher(struct sock *sk);
1695 void tcp_fastopen_ctx_destroy(struct net *net);
1696 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1697 void *primary_key, void *backup_key);
1698 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1700 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1701 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1702 struct request_sock *req,
1703 struct tcp_fastopen_cookie *foc,
1704 const struct dst_entry *dst);
1705 void tcp_fastopen_init_key_once(struct net *net);
1706 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1707 struct tcp_fastopen_cookie *cookie);
1708 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1709 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1710 #define TCP_FASTOPEN_KEY_MAX 2
1711 #define TCP_FASTOPEN_KEY_BUF_LENGTH \
1712 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1714 /* Fastopen key context */
1715 struct tcp_fastopen_context {
1716 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
1718 struct rcu_head rcu;
1721 void tcp_fastopen_active_disable(struct sock *sk);
1722 bool tcp_fastopen_active_should_disable(struct sock *sk);
1723 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1724 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1726 /* Caller needs to wrap with rcu_read_(un)lock() */
1728 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1730 struct tcp_fastopen_context *ctx;
1732 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1734 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1739 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1740 const struct tcp_fastopen_cookie *orig)
1742 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1743 orig->len == foc->len &&
1744 !memcmp(orig->val, foc->val, foc->len))
1750 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1755 /* Latencies incurred by various limits for a sender. They are
1756 * chronograph-like stats that are mutually exclusive.
1760 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1761 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1762 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1766 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1767 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1769 /* This helper is needed, because skb->tcp_tsorted_anchor uses
1770 * the same memory storage than skb->destructor/_skb_refdst
1772 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1774 skb->destructor = NULL;
1775 skb->_skb_refdst = 0UL;
1778 #define tcp_skb_tsorted_save(skb) { \
1779 unsigned long _save = skb->_skb_refdst; \
1780 skb->_skb_refdst = 0UL;
1782 #define tcp_skb_tsorted_restore(skb) \
1783 skb->_skb_refdst = _save; \
1786 void tcp_write_queue_purge(struct sock *sk);
1788 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1790 return skb_rb_first(&sk->tcp_rtx_queue);
1793 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1795 return skb_rb_last(&sk->tcp_rtx_queue);
1798 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1800 return skb_peek(&sk->sk_write_queue);
1803 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1805 return skb_peek_tail(&sk->sk_write_queue);
1808 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1809 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1811 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1813 return skb_peek(&sk->sk_write_queue);
1816 static inline bool tcp_skb_is_last(const struct sock *sk,
1817 const struct sk_buff *skb)
1819 return skb_queue_is_last(&sk->sk_write_queue, skb);
1823 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1826 * Since the write queue can have a temporary empty skb in it,
1827 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1829 static inline bool tcp_write_queue_empty(const struct sock *sk)
1831 const struct tcp_sock *tp = tcp_sk(sk);
1833 return tp->write_seq == tp->snd_nxt;
1836 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1838 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1841 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1843 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1846 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1848 __skb_queue_tail(&sk->sk_write_queue, skb);
1850 /* Queue it, remembering where we must start sending. */
1851 if (sk->sk_write_queue.next == skb)
1852 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1855 /* Insert new before skb on the write queue of sk. */
1856 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1857 struct sk_buff *skb,
1860 __skb_queue_before(&sk->sk_write_queue, skb, new);
1863 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1865 tcp_skb_tsorted_anchor_cleanup(skb);
1866 __skb_unlink(skb, &sk->sk_write_queue);
1869 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1871 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1873 tcp_skb_tsorted_anchor_cleanup(skb);
1874 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1877 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1879 list_del(&skb->tcp_tsorted_anchor);
1880 tcp_rtx_queue_unlink(skb, sk);
1881 sk_wmem_free_skb(sk, skb);
1884 static inline void tcp_push_pending_frames(struct sock *sk)
1886 if (tcp_send_head(sk)) {
1887 struct tcp_sock *tp = tcp_sk(sk);
1889 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1893 /* Start sequence of the skb just after the highest skb with SACKed
1894 * bit, valid only if sacked_out > 0 or when the caller has ensured
1895 * validity by itself.
1897 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1899 if (!tp->sacked_out)
1902 if (tp->highest_sack == NULL)
1905 return TCP_SKB_CB(tp->highest_sack)->seq;
1908 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1910 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1913 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1915 return tcp_sk(sk)->highest_sack;
1918 static inline void tcp_highest_sack_reset(struct sock *sk)
1920 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1923 /* Called when old skb is about to be deleted and replaced by new skb */
1924 static inline void tcp_highest_sack_replace(struct sock *sk,
1925 struct sk_buff *old,
1926 struct sk_buff *new)
1928 if (old == tcp_highest_sack(sk))
1929 tcp_sk(sk)->highest_sack = new;
1932 /* This helper checks if socket has IP_TRANSPARENT set */
1933 static inline bool inet_sk_transparent(const struct sock *sk)
1935 switch (sk->sk_state) {
1937 return inet_twsk(sk)->tw_transparent;
1938 case TCP_NEW_SYN_RECV:
1939 return inet_rsk(inet_reqsk(sk))->no_srccheck;
1941 return inet_sk(sk)->transparent;
1944 /* Determines whether this is a thin stream (which may suffer from
1945 * increased latency). Used to trigger latency-reducing mechanisms.
1947 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1949 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1953 enum tcp_seq_states {
1954 TCP_SEQ_STATE_LISTENING,
1955 TCP_SEQ_STATE_ESTABLISHED,
1958 void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
1959 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1960 void tcp_seq_stop(struct seq_file *seq, void *v);
1962 struct tcp_seq_afinfo {
1966 struct tcp_iter_state {
1967 struct seq_net_private p;
1968 enum tcp_seq_states state;
1969 struct sock *syn_wait_sk;
1970 int bucket, offset, sbucket, num;
1974 extern struct request_sock_ops tcp_request_sock_ops;
1975 extern struct request_sock_ops tcp6_request_sock_ops;
1977 void tcp_v4_destroy_sock(struct sock *sk);
1979 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1980 netdev_features_t features);
1981 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
1982 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
1983 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
1984 INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
1985 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
1986 int tcp_gro_complete(struct sk_buff *skb);
1988 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1990 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1992 struct net *net = sock_net((struct sock *)tp);
1993 return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1996 bool tcp_stream_memory_free(const struct sock *sk, int wake);
1998 #ifdef CONFIG_PROC_FS
1999 int tcp4_proc_init(void);
2000 void tcp4_proc_exit(void);
2003 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2004 int tcp_conn_request(struct request_sock_ops *rsk_ops,
2005 const struct tcp_request_sock_ops *af_ops,
2006 struct sock *sk, struct sk_buff *skb);
2008 /* TCP af-specific functions */
2009 struct tcp_sock_af_ops {
2010 #ifdef CONFIG_TCP_MD5SIG
2011 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2012 const struct sock *addr_sk);
2013 int (*calc_md5_hash)(char *location,
2014 const struct tcp_md5sig_key *md5,
2015 const struct sock *sk,
2016 const struct sk_buff *skb);
2017 int (*md5_parse)(struct sock *sk,
2024 struct tcp_request_sock_ops {
2026 #ifdef CONFIG_TCP_MD5SIG
2027 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2028 const struct sock *addr_sk);
2029 int (*calc_md5_hash) (char *location,
2030 const struct tcp_md5sig_key *md5,
2031 const struct sock *sk,
2032 const struct sk_buff *skb);
2034 #ifdef CONFIG_SYN_COOKIES
2035 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2038 struct dst_entry *(*route_req)(const struct sock *sk,
2039 struct sk_buff *skb,
2041 struct request_sock *req);
2042 u32 (*init_seq)(const struct sk_buff *skb);
2043 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2044 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2045 struct flowi *fl, struct request_sock *req,
2046 struct tcp_fastopen_cookie *foc,
2047 enum tcp_synack_type synack_type,
2048 struct sk_buff *syn_skb);
2051 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2052 #if IS_ENABLED(CONFIG_IPV6)
2053 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2056 #ifdef CONFIG_SYN_COOKIES
2057 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2058 const struct sock *sk, struct sk_buff *skb,
2061 tcp_synq_overflow(sk);
2062 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2063 return ops->cookie_init_seq(skb, mss);
2066 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2067 const struct sock *sk, struct sk_buff *skb,
2074 int tcpv4_offload_init(void);
2076 void tcp_v4_init(void);
2077 void tcp_init(void);
2079 /* tcp_recovery.c */
2080 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2081 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2082 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2084 extern bool tcp_rack_mark_lost(struct sock *sk);
2085 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2087 extern void tcp_rack_reo_timeout(struct sock *sk);
2088 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2090 /* At how many usecs into the future should the RTO fire? */
2091 static inline s64 tcp_rto_delta_us(const struct sock *sk)
2093 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2094 u32 rto = inet_csk(sk)->icsk_rto;
2095 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2097 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2101 * Save and compile IPv4 options, return a pointer to it
2103 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2104 struct sk_buff *skb)
2106 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2107 struct ip_options_rcu *dopt = NULL;
2110 int opt_size = sizeof(*dopt) + opt->optlen;
2112 dopt = kmalloc(opt_size, GFP_ATOMIC);
2113 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2121 /* locally generated TCP pure ACKs have skb->truesize == 2
2122 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2123 * This is much faster than dissecting the packet to find out.
2124 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2126 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2128 return skb->truesize == 2;
2131 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2136 static inline int tcp_inq(struct sock *sk)
2138 struct tcp_sock *tp = tcp_sk(sk);
2141 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2143 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2145 before(tp->urg_seq, tp->copied_seq) ||
2146 !before(tp->urg_seq, tp->rcv_nxt)) {
2148 answ = tp->rcv_nxt - tp->copied_seq;
2150 /* Subtract 1, if FIN was received */
2151 if (answ && sock_flag(sk, SOCK_DONE))
2154 answ = tp->urg_seq - tp->copied_seq;
2160 int tcp_peek_len(struct socket *sock);
2162 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2166 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2167 tp->segs_in += segs_in;
2168 if (skb->len > tcp_hdrlen(skb))
2169 tp->data_segs_in += segs_in;
2173 * TCP listen path runs lockless.
2174 * We forced "struct sock" to be const qualified to make sure
2175 * we don't modify one of its field by mistake.
2176 * Here, we increment sk_drops which is an atomic_t, so we can safely
2177 * make sock writable again.
2179 static inline void tcp_listendrop(const struct sock *sk)
2181 atomic_inc(&((struct sock *)sk)->sk_drops);
2182 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2185 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2188 * Interface for adding Upper Level Protocols over TCP
2191 #define TCP_ULP_NAME_MAX 16
2192 #define TCP_ULP_MAX 128
2193 #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2195 struct tcp_ulp_ops {
2196 struct list_head list;
2198 /* initialize ulp */
2199 int (*init)(struct sock *sk);
2201 void (*update)(struct sock *sk, struct proto *p,
2202 void (*write_space)(struct sock *sk));
2204 void (*release)(struct sock *sk);
2206 int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2207 size_t (*get_info_size)(const struct sock *sk);
2209 void (*clone)(const struct request_sock *req, struct sock *newsk,
2210 const gfp_t priority);
2212 char name[TCP_ULP_NAME_MAX];
2213 struct module *owner;
2215 int tcp_register_ulp(struct tcp_ulp_ops *type);
2216 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2217 int tcp_set_ulp(struct sock *sk, const char *name);
2218 void tcp_get_available_ulp(char *buf, size_t len);
2219 void tcp_cleanup_ulp(struct sock *sk);
2220 void tcp_update_ulp(struct sock *sk, struct proto *p,
2221 void (*write_space)(struct sock *sk));
2223 #define MODULE_ALIAS_TCP_ULP(name) \
2224 __MODULE_INFO(alias, alias_userspace, name); \
2225 __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2227 #ifdef CONFIG_NET_SOCK_MSG
2231 #ifdef CONFIG_BPF_SYSCALL
2232 struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2233 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2234 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2235 #endif /* CONFIG_BPF_SYSCALL */
2237 int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
2239 #endif /* CONFIG_NET_SOCK_MSG */
2241 #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2242 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2247 #ifdef CONFIG_CGROUP_BPF
2248 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2249 struct sk_buff *skb,
2250 unsigned int end_offset)
2253 skops->skb_data_end = skb->data + end_offset;
2256 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2257 struct sk_buff *skb,
2258 unsigned int end_offset)
2263 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2264 * is < 0, then the BPF op failed (for example if the loaded BPF
2265 * program does not support the chosen operation or there is no BPF
2269 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2271 struct bpf_sock_ops_kern sock_ops;
2274 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2275 if (sk_fullsock(sk)) {
2276 sock_ops.is_fullsock = 1;
2277 sock_owned_by_me(sk);
2283 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2285 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2287 ret = sock_ops.reply;
2293 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2295 u32 args[2] = {arg1, arg2};
2297 return tcp_call_bpf(sk, op, 2, args);
2300 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2303 u32 args[3] = {arg1, arg2, arg3};
2305 return tcp_call_bpf(sk, op, 3, args);
2309 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2314 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2319 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2327 static inline u32 tcp_timeout_init(struct sock *sk)
2331 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2334 timeout = TCP_TIMEOUT_INIT;
2338 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2342 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2349 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2351 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2354 static inline void tcp_bpf_rtt(struct sock *sk)
2356 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2357 tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2360 #if IS_ENABLED(CONFIG_SMC)
2361 extern struct static_key_false tcp_have_smc;
2364 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2365 void clean_acked_data_enable(struct inet_connection_sock *icsk,
2366 void (*cad)(struct sock *sk, u32 ack_seq));
2367 void clean_acked_data_disable(struct inet_connection_sock *icsk);
2368 void clean_acked_data_flush(void);
2371 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2372 static inline void tcp_add_tx_delay(struct sk_buff *skb,
2373 const struct tcp_sock *tp)
2375 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2376 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2379 /* Compute Earliest Departure Time for some control packets
2380 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2382 static inline u64 tcp_transmit_time(const struct sock *sk)
2384 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2385 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2386 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2388 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;