Merge branch 'dmi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelvar...
[platform/kernel/linux-starfive.git] / net / ipv4 / tcp_yeah.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   YeAH TCP
5  *
6  * For further details look at:
7  *   https://web.archive.org/web/20080316215752/http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
8  *
9  */
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/skbuff.h>
13 #include <linux/inet_diag.h>
14
15 #include <net/tcp.h>
16
17 #include "tcp_vegas.h"
18
19 #define TCP_YEAH_ALPHA       80 /* number of packets queued at the bottleneck */
20 #define TCP_YEAH_GAMMA        1 /* fraction of queue to be removed per rtt */
21 #define TCP_YEAH_DELTA        3 /* log minimum fraction of cwnd to be removed on loss */
22 #define TCP_YEAH_EPSILON      1 /* log maximum fraction to be removed on early decongestion */
23 #define TCP_YEAH_PHY          8 /* maximum delta from base */
24 #define TCP_YEAH_RHO         16 /* minimum number of consecutive rtt to consider competition on loss */
25 #define TCP_YEAH_ZETA        50 /* minimum number of state switches to reset reno_count */
26
27 #define TCP_SCALABLE_AI_CNT      100U
28
29 /* YeAH variables */
30 struct yeah {
31         struct vegas vegas;     /* must be first */
32
33         /* YeAH */
34         u32 lastQ;
35         u32 doing_reno_now;
36
37         u32 reno_count;
38         u32 fast_count;
39 };
40
41 static void tcp_yeah_init(struct sock *sk)
42 {
43         struct tcp_sock *tp = tcp_sk(sk);
44         struct yeah *yeah = inet_csk_ca(sk);
45
46         tcp_vegas_init(sk);
47
48         yeah->doing_reno_now = 0;
49         yeah->lastQ = 0;
50
51         yeah->reno_count = 2;
52
53         /* Ensure the MD arithmetic works.  This is somewhat pedantic,
54          * since I don't think we will see a cwnd this large. :) */
55         tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
56 }
57
58 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
59 {
60         struct tcp_sock *tp = tcp_sk(sk);
61         struct yeah *yeah = inet_csk_ca(sk);
62
63         if (!tcp_is_cwnd_limited(sk))
64                 return;
65
66         if (tcp_in_slow_start(tp)) {
67                 acked = tcp_slow_start(tp, acked);
68                 if (!acked)
69                         goto do_vegas;
70         }
71
72         if (!yeah->doing_reno_now) {
73                 /* Scalable */
74                 tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT),
75                                   acked);
76         } else {
77                 /* Reno */
78                 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
79         }
80
81         /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
82          *
83          * These are so named because they represent the approximate values
84          * of snd_una and snd_nxt at the beginning of the current RTT. More
85          * precisely, they represent the amount of data sent during the RTT.
86          * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
87          * we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding
88          * bytes of data have been ACKed during the course of the RTT, giving
89          * an "actual" rate of:
90          *
91          *     (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration)
92          *
93          * Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una,
94          * because delayed ACKs can cover more than one segment, so they
95          * don't line up yeahly with the boundaries of RTTs.
96          *
97          * Another unfortunate fact of life is that delayed ACKs delay the
98          * advance of the left edge of our send window, so that the number
99          * of bytes we send in an RTT is often less than our cwnd will allow.
100          * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
101          */
102 do_vegas:
103         if (after(ack, yeah->vegas.beg_snd_nxt)) {
104                 /* We do the Vegas calculations only if we got enough RTT
105                  * samples that we can be reasonably sure that we got
106                  * at least one RTT sample that wasn't from a delayed ACK.
107                  * If we only had 2 samples total,
108                  * then that means we're getting only 1 ACK per RTT, which
109                  * means they're almost certainly delayed ACKs.
110                  * If  we have 3 samples, we should be OK.
111                  */
112
113                 if (yeah->vegas.cntRTT > 2) {
114                         u32 rtt, queue;
115                         u64 bw;
116
117                         /* We have enough RTT samples, so, using the Vegas
118                          * algorithm, we determine if we should increase or
119                          * decrease cwnd, and by how much.
120                          */
121
122                         /* Pluck out the RTT we are using for the Vegas
123                          * calculations. This is the min RTT seen during the
124                          * last RTT. Taking the min filters out the effects
125                          * of delayed ACKs, at the cost of noticing congestion
126                          * a bit later.
127                          */
128                         rtt = yeah->vegas.minRTT;
129
130                         /* Compute excess number of packets above bandwidth
131                          * Avoid doing full 64 bit divide.
132                          */
133                         bw = tcp_snd_cwnd(tp);
134                         bw *= rtt - yeah->vegas.baseRTT;
135                         do_div(bw, rtt);
136                         queue = bw;
137
138                         if (queue > TCP_YEAH_ALPHA ||
139                             rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
140                                 if (queue > TCP_YEAH_ALPHA &&
141                                     tcp_snd_cwnd(tp) > yeah->reno_count) {
142                                         u32 reduction = min(queue / TCP_YEAH_GAMMA ,
143                                                             tcp_snd_cwnd(tp) >> TCP_YEAH_EPSILON);
144
145                                         tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - reduction);
146
147                                         tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp),
148                                                                  yeah->reno_count));
149
150                                         tp->snd_ssthresh = tcp_snd_cwnd(tp);
151                                 }
152
153                                 if (yeah->reno_count <= 2)
154                                         yeah->reno_count = max(tcp_snd_cwnd(tp)>>1, 2U);
155                                 else
156                                         yeah->reno_count++;
157
158                                 yeah->doing_reno_now = min(yeah->doing_reno_now + 1,
159                                                            0xffffffU);
160                         } else {
161                                 yeah->fast_count++;
162
163                                 if (yeah->fast_count > TCP_YEAH_ZETA) {
164                                         yeah->reno_count = 2;
165                                         yeah->fast_count = 0;
166                                 }
167
168                                 yeah->doing_reno_now = 0;
169                         }
170
171                         yeah->lastQ = queue;
172                 }
173
174                 /* Save the extent of the current window so we can use this
175                  * at the end of the next RTT.
176                  */
177                 yeah->vegas.beg_snd_una  = yeah->vegas.beg_snd_nxt;
178                 yeah->vegas.beg_snd_nxt  = tp->snd_nxt;
179                 yeah->vegas.beg_snd_cwnd = tcp_snd_cwnd(tp);
180
181                 /* Wipe the slate clean for the next RTT. */
182                 yeah->vegas.cntRTT = 0;
183                 yeah->vegas.minRTT = 0x7fffffff;
184         }
185 }
186
187 static u32 tcp_yeah_ssthresh(struct sock *sk)
188 {
189         const struct tcp_sock *tp = tcp_sk(sk);
190         struct yeah *yeah = inet_csk_ca(sk);
191         u32 reduction;
192
193         if (yeah->doing_reno_now < TCP_YEAH_RHO) {
194                 reduction = yeah->lastQ;
195
196                 reduction = min(reduction, max(tcp_snd_cwnd(tp)>>1, 2U));
197
198                 reduction = max(reduction, tcp_snd_cwnd(tp) >> TCP_YEAH_DELTA);
199         } else
200                 reduction = max(tcp_snd_cwnd(tp)>>1, 2U);
201
202         yeah->fast_count = 0;
203         yeah->reno_count = max(yeah->reno_count>>1, 2U);
204
205         return max_t(int, tcp_snd_cwnd(tp) - reduction, 2);
206 }
207
208 static struct tcp_congestion_ops tcp_yeah __read_mostly = {
209         .init           = tcp_yeah_init,
210         .ssthresh       = tcp_yeah_ssthresh,
211         .undo_cwnd      = tcp_reno_undo_cwnd,
212         .cong_avoid     = tcp_yeah_cong_avoid,
213         .set_state      = tcp_vegas_state,
214         .cwnd_event     = tcp_vegas_cwnd_event,
215         .get_info       = tcp_vegas_get_info,
216         .pkts_acked     = tcp_vegas_pkts_acked,
217
218         .owner          = THIS_MODULE,
219         .name           = "yeah",
220 };
221
222 static int __init tcp_yeah_register(void)
223 {
224         BUILD_BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE);
225         tcp_register_congestion_control(&tcp_yeah);
226         return 0;
227 }
228
229 static void __exit tcp_yeah_unregister(void)
230 {
231         tcp_unregister_congestion_control(&tcp_yeah);
232 }
233
234 module_init(tcp_yeah_register);
235 module_exit(tcp_yeah_unregister);
236
237 MODULE_AUTHOR("Angelo P. Castellani");
238 MODULE_LICENSE("GPL");
239 MODULE_DESCRIPTION("YeAH TCP");