tcp_metrics: annotate data-races around tm->tcpm_vals[]
[platform/kernel/linux-starfive.git] / net / ipv4 / tcp_metrics.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/rcupdate.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/tcp.h>
10 #include <linux/hash.h>
11 #include <linux/tcp_metrics.h>
12 #include <linux/vmalloc.h>
13
14 #include <net/inet_connection_sock.h>
15 #include <net/net_namespace.h>
16 #include <net/request_sock.h>
17 #include <net/inetpeer.h>
18 #include <net/sock.h>
19 #include <net/ipv6.h>
20 #include <net/dst.h>
21 #include <net/tcp.h>
22 #include <net/genetlink.h>
23
24 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
25                                                    const struct inetpeer_addr *daddr,
26                                                    struct net *net, unsigned int hash);
27
28 struct tcp_fastopen_metrics {
29         u16     mss;
30         u16     syn_loss:10,            /* Recurring Fast Open SYN losses */
31                 try_exp:2;              /* Request w/ exp. option (once) */
32         unsigned long   last_syn_loss;  /* Last Fast Open SYN loss */
33         struct  tcp_fastopen_cookie     cookie;
34 };
35
36 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37  * Kernel only stores RTT and RTTVAR in usec resolution
38  */
39 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
40
41 struct tcp_metrics_block {
42         struct tcp_metrics_block __rcu  *tcpm_next;
43         possible_net_t                  tcpm_net;
44         struct inetpeer_addr            tcpm_saddr;
45         struct inetpeer_addr            tcpm_daddr;
46         unsigned long                   tcpm_stamp;
47         u32                             tcpm_lock;
48         u32                             tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
49         struct tcp_fastopen_metrics     tcpm_fastopen;
50
51         struct rcu_head                 rcu_head;
52 };
53
54 static inline struct net *tm_net(struct tcp_metrics_block *tm)
55 {
56         return read_pnet(&tm->tcpm_net);
57 }
58
59 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
60                               enum tcp_metric_index idx)
61 {
62         /* Paired with WRITE_ONCE() in tcpm_suck_dst() */
63         return READ_ONCE(tm->tcpm_lock) & (1 << idx);
64 }
65
66 static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
67                           enum tcp_metric_index idx)
68 {
69         /* Paired with WRITE_ONCE() in tcp_metric_set() */
70         return READ_ONCE(tm->tcpm_vals[idx]);
71 }
72
73 static void tcp_metric_set(struct tcp_metrics_block *tm,
74                            enum tcp_metric_index idx,
75                            u32 val)
76 {
77         /* Paired with READ_ONCE() in tcp_metric_get() */
78         WRITE_ONCE(tm->tcpm_vals[idx], val);
79 }
80
81 static bool addr_same(const struct inetpeer_addr *a,
82                       const struct inetpeer_addr *b)
83 {
84         return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
85 }
86
87 struct tcpm_hash_bucket {
88         struct tcp_metrics_block __rcu  *chain;
89 };
90
91 static struct tcpm_hash_bucket  *tcp_metrics_hash __read_mostly;
92 static unsigned int             tcp_metrics_hash_log __read_mostly;
93
94 static DEFINE_SPINLOCK(tcp_metrics_lock);
95
96 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
97                           const struct dst_entry *dst,
98                           bool fastopen_clear)
99 {
100         u32 msval;
101         u32 val;
102
103         WRITE_ONCE(tm->tcpm_stamp, jiffies);
104
105         val = 0;
106         if (dst_metric_locked(dst, RTAX_RTT))
107                 val |= 1 << TCP_METRIC_RTT;
108         if (dst_metric_locked(dst, RTAX_RTTVAR))
109                 val |= 1 << TCP_METRIC_RTTVAR;
110         if (dst_metric_locked(dst, RTAX_SSTHRESH))
111                 val |= 1 << TCP_METRIC_SSTHRESH;
112         if (dst_metric_locked(dst, RTAX_CWND))
113                 val |= 1 << TCP_METRIC_CWND;
114         if (dst_metric_locked(dst, RTAX_REORDERING))
115                 val |= 1 << TCP_METRIC_REORDERING;
116         /* Paired with READ_ONCE() in tcp_metric_locked() */
117         WRITE_ONCE(tm->tcpm_lock, val);
118
119         msval = dst_metric_raw(dst, RTAX_RTT);
120         tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
121
122         msval = dst_metric_raw(dst, RTAX_RTTVAR);
123         tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
124         tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
125                        dst_metric_raw(dst, RTAX_SSTHRESH));
126         tcp_metric_set(tm, TCP_METRIC_CWND,
127                        dst_metric_raw(dst, RTAX_CWND));
128         tcp_metric_set(tm, TCP_METRIC_REORDERING,
129                        dst_metric_raw(dst, RTAX_REORDERING));
130         if (fastopen_clear) {
131                 tm->tcpm_fastopen.mss = 0;
132                 tm->tcpm_fastopen.syn_loss = 0;
133                 tm->tcpm_fastopen.try_exp = 0;
134                 tm->tcpm_fastopen.cookie.exp = false;
135                 tm->tcpm_fastopen.cookie.len = 0;
136         }
137 }
138
139 #define TCP_METRICS_TIMEOUT             (60 * 60 * HZ)
140
141 static void tcpm_check_stamp(struct tcp_metrics_block *tm,
142                              const struct dst_entry *dst)
143 {
144         unsigned long limit;
145
146         if (!tm)
147                 return;
148         limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
149         if (unlikely(time_after(jiffies, limit)))
150                 tcpm_suck_dst(tm, dst, false);
151 }
152
153 #define TCP_METRICS_RECLAIM_DEPTH       5
154 #define TCP_METRICS_RECLAIM_PTR         (struct tcp_metrics_block *) 0x1UL
155
156 #define deref_locked(p) \
157         rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
158
159 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
160                                           struct inetpeer_addr *saddr,
161                                           struct inetpeer_addr *daddr,
162                                           unsigned int hash)
163 {
164         struct tcp_metrics_block *tm;
165         struct net *net;
166         bool reclaim = false;
167
168         spin_lock_bh(&tcp_metrics_lock);
169         net = dev_net(dst->dev);
170
171         /* While waiting for the spin-lock the cache might have been populated
172          * with this entry and so we have to check again.
173          */
174         tm = __tcp_get_metrics(saddr, daddr, net, hash);
175         if (tm == TCP_METRICS_RECLAIM_PTR) {
176                 reclaim = true;
177                 tm = NULL;
178         }
179         if (tm) {
180                 tcpm_check_stamp(tm, dst);
181                 goto out_unlock;
182         }
183
184         if (unlikely(reclaim)) {
185                 struct tcp_metrics_block *oldest;
186
187                 oldest = deref_locked(tcp_metrics_hash[hash].chain);
188                 for (tm = deref_locked(oldest->tcpm_next); tm;
189                      tm = deref_locked(tm->tcpm_next)) {
190                         if (time_before(READ_ONCE(tm->tcpm_stamp),
191                                         READ_ONCE(oldest->tcpm_stamp)))
192                                 oldest = tm;
193                 }
194                 tm = oldest;
195         } else {
196                 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
197                 if (!tm)
198                         goto out_unlock;
199         }
200         write_pnet(&tm->tcpm_net, net);
201         tm->tcpm_saddr = *saddr;
202         tm->tcpm_daddr = *daddr;
203
204         tcpm_suck_dst(tm, dst, true);
205
206         if (likely(!reclaim)) {
207                 tm->tcpm_next = tcp_metrics_hash[hash].chain;
208                 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
209         }
210
211 out_unlock:
212         spin_unlock_bh(&tcp_metrics_lock);
213         return tm;
214 }
215
216 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
217 {
218         if (tm)
219                 return tm;
220         if (depth > TCP_METRICS_RECLAIM_DEPTH)
221                 return TCP_METRICS_RECLAIM_PTR;
222         return NULL;
223 }
224
225 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
226                                                    const struct inetpeer_addr *daddr,
227                                                    struct net *net, unsigned int hash)
228 {
229         struct tcp_metrics_block *tm;
230         int depth = 0;
231
232         for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
233              tm = rcu_dereference(tm->tcpm_next)) {
234                 if (addr_same(&tm->tcpm_saddr, saddr) &&
235                     addr_same(&tm->tcpm_daddr, daddr) &&
236                     net_eq(tm_net(tm), net))
237                         break;
238                 depth++;
239         }
240         return tcp_get_encode(tm, depth);
241 }
242
243 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
244                                                        struct dst_entry *dst)
245 {
246         struct tcp_metrics_block *tm;
247         struct inetpeer_addr saddr, daddr;
248         unsigned int hash;
249         struct net *net;
250
251         saddr.family = req->rsk_ops->family;
252         daddr.family = req->rsk_ops->family;
253         switch (daddr.family) {
254         case AF_INET:
255                 inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
256                 inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
257                 hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
258                 break;
259 #if IS_ENABLED(CONFIG_IPV6)
260         case AF_INET6:
261                 inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
262                 inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
263                 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
264                 break;
265 #endif
266         default:
267                 return NULL;
268         }
269
270         net = dev_net(dst->dev);
271         hash ^= net_hash_mix(net);
272         hash = hash_32(hash, tcp_metrics_hash_log);
273
274         for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
275              tm = rcu_dereference(tm->tcpm_next)) {
276                 if (addr_same(&tm->tcpm_saddr, &saddr) &&
277                     addr_same(&tm->tcpm_daddr, &daddr) &&
278                     net_eq(tm_net(tm), net))
279                         break;
280         }
281         tcpm_check_stamp(tm, dst);
282         return tm;
283 }
284
285 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
286                                                  struct dst_entry *dst,
287                                                  bool create)
288 {
289         struct tcp_metrics_block *tm;
290         struct inetpeer_addr saddr, daddr;
291         unsigned int hash;
292         struct net *net;
293
294         if (sk->sk_family == AF_INET) {
295                 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
296                 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
297                 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
298         }
299 #if IS_ENABLED(CONFIG_IPV6)
300         else if (sk->sk_family == AF_INET6) {
301                 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
302                         inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
303                         inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
304                         hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
305                 } else {
306                         inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
307                         inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
308                         hash = ipv6_addr_hash(&sk->sk_v6_daddr);
309                 }
310         }
311 #endif
312         else
313                 return NULL;
314
315         net = dev_net(dst->dev);
316         hash ^= net_hash_mix(net);
317         hash = hash_32(hash, tcp_metrics_hash_log);
318
319         tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
320         if (tm == TCP_METRICS_RECLAIM_PTR)
321                 tm = NULL;
322         if (!tm && create)
323                 tm = tcpm_new(dst, &saddr, &daddr, hash);
324         else
325                 tcpm_check_stamp(tm, dst);
326
327         return tm;
328 }
329
330 /* Save metrics learned by this TCP session.  This function is called
331  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
332  * or goes from LAST-ACK to CLOSE.
333  */
334 void tcp_update_metrics(struct sock *sk)
335 {
336         const struct inet_connection_sock *icsk = inet_csk(sk);
337         struct dst_entry *dst = __sk_dst_get(sk);
338         struct tcp_sock *tp = tcp_sk(sk);
339         struct net *net = sock_net(sk);
340         struct tcp_metrics_block *tm;
341         unsigned long rtt;
342         u32 val;
343         int m;
344
345         sk_dst_confirm(sk);
346         if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
347                 return;
348
349         rcu_read_lock();
350         if (icsk->icsk_backoff || !tp->srtt_us) {
351                 /* This session failed to estimate rtt. Why?
352                  * Probably, no packets returned in time.  Reset our
353                  * results.
354                  */
355                 tm = tcp_get_metrics(sk, dst, false);
356                 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
357                         tcp_metric_set(tm, TCP_METRIC_RTT, 0);
358                 goto out_unlock;
359         } else
360                 tm = tcp_get_metrics(sk, dst, true);
361
362         if (!tm)
363                 goto out_unlock;
364
365         rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
366         m = rtt - tp->srtt_us;
367
368         /* If newly calculated rtt larger than stored one, store new
369          * one. Otherwise, use EWMA. Remember, rtt overestimation is
370          * always better than underestimation.
371          */
372         if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
373                 if (m <= 0)
374                         rtt = tp->srtt_us;
375                 else
376                         rtt -= (m >> 3);
377                 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
378         }
379
380         if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
381                 unsigned long var;
382
383                 if (m < 0)
384                         m = -m;
385
386                 /* Scale deviation to rttvar fixed point */
387                 m >>= 1;
388                 if (m < tp->mdev_us)
389                         m = tp->mdev_us;
390
391                 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
392                 if (m >= var)
393                         var = m;
394                 else
395                         var -= (var - m) >> 2;
396
397                 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
398         }
399
400         if (tcp_in_initial_slowstart(tp)) {
401                 /* Slow start still did not finish. */
402                 if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
403                     !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
404                         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
405                         if (val && (tcp_snd_cwnd(tp) >> 1) > val)
406                                 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
407                                                tcp_snd_cwnd(tp) >> 1);
408                 }
409                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
410                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
411                         if (tcp_snd_cwnd(tp) > val)
412                                 tcp_metric_set(tm, TCP_METRIC_CWND,
413                                                tcp_snd_cwnd(tp));
414                 }
415         } else if (!tcp_in_slow_start(tp) &&
416                    icsk->icsk_ca_state == TCP_CA_Open) {
417                 /* Cong. avoidance phase, cwnd is reliable. */
418                 if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
419                     !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
420                         tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
421                                        max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
422                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
423                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
424                         tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1);
425                 }
426         } else {
427                 /* Else slow start did not finish, cwnd is non-sense,
428                  * ssthresh may be also invalid.
429                  */
430                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
431                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
432                         tcp_metric_set(tm, TCP_METRIC_CWND,
433                                        (val + tp->snd_ssthresh) >> 1);
434                 }
435                 if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
436                     !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
437                         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
438                         if (val && tp->snd_ssthresh > val)
439                                 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
440                                                tp->snd_ssthresh);
441                 }
442                 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
443                         val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
444                         if (val < tp->reordering &&
445                             tp->reordering !=
446                             READ_ONCE(net->ipv4.sysctl_tcp_reordering))
447                                 tcp_metric_set(tm, TCP_METRIC_REORDERING,
448                                                tp->reordering);
449                 }
450         }
451         WRITE_ONCE(tm->tcpm_stamp, jiffies);
452 out_unlock:
453         rcu_read_unlock();
454 }
455
456 /* Initialize metrics on socket. */
457
458 void tcp_init_metrics(struct sock *sk)
459 {
460         struct dst_entry *dst = __sk_dst_get(sk);
461         struct tcp_sock *tp = tcp_sk(sk);
462         struct net *net = sock_net(sk);
463         struct tcp_metrics_block *tm;
464         u32 val, crtt = 0; /* cached RTT scaled by 8 */
465
466         sk_dst_confirm(sk);
467         if (!dst)
468                 goto reset;
469
470         rcu_read_lock();
471         tm = tcp_get_metrics(sk, dst, true);
472         if (!tm) {
473                 rcu_read_unlock();
474                 goto reset;
475         }
476
477         if (tcp_metric_locked(tm, TCP_METRIC_CWND))
478                 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
479
480         val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
481               0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
482         if (val) {
483                 tp->snd_ssthresh = val;
484                 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
485                         tp->snd_ssthresh = tp->snd_cwnd_clamp;
486         } else {
487                 /* ssthresh may have been reduced unnecessarily during.
488                  * 3WHS. Restore it back to its initial default.
489                  */
490                 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
491         }
492         val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
493         if (val && tp->reordering != val)
494                 tp->reordering = val;
495
496         crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
497         rcu_read_unlock();
498 reset:
499         /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
500          * to seed the RTO for later data packets because SYN packets are
501          * small. Use the per-dst cached values to seed the RTO but keep
502          * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
503          * Later the RTO will be updated immediately upon obtaining the first
504          * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
505          * influences the first RTO but not later RTT estimation.
506          *
507          * But if RTT is not available from the SYN (due to retransmits or
508          * syn cookies) or the cache, force a conservative 3secs timeout.
509          *
510          * A bit of theory. RTT is time passed after "normal" sized packet
511          * is sent until it is ACKed. In normal circumstances sending small
512          * packets force peer to delay ACKs and calculation is correct too.
513          * The algorithm is adaptive and, provided we follow specs, it
514          * NEVER underestimate RTT. BUT! If peer tries to make some clever
515          * tricks sort of "quick acks" for time long enough to decrease RTT
516          * to low value, and then abruptly stops to do it and starts to delay
517          * ACKs, wait for troubles.
518          */
519         if (crtt > tp->srtt_us) {
520                 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
521                 crtt /= 8 * USEC_PER_SEC / HZ;
522                 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
523         } else if (tp->srtt_us == 0) {
524                 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
525                  * 3WHS. This is most likely due to retransmission,
526                  * including spurious one. Reset the RTO back to 3secs
527                  * from the more aggressive 1sec to avoid more spurious
528                  * retransmission.
529                  */
530                 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
531                 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
532
533                 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
534         }
535 }
536
537 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
538 {
539         struct tcp_metrics_block *tm;
540         bool ret;
541
542         if (!dst)
543                 return false;
544
545         rcu_read_lock();
546         tm = __tcp_get_metrics_req(req, dst);
547         if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
548                 ret = true;
549         else
550                 ret = false;
551         rcu_read_unlock();
552
553         return ret;
554 }
555
556 static DEFINE_SEQLOCK(fastopen_seqlock);
557
558 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
559                             struct tcp_fastopen_cookie *cookie)
560 {
561         struct tcp_metrics_block *tm;
562
563         rcu_read_lock();
564         tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
565         if (tm) {
566                 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
567                 unsigned int seq;
568
569                 do {
570                         seq = read_seqbegin(&fastopen_seqlock);
571                         if (tfom->mss)
572                                 *mss = tfom->mss;
573                         *cookie = tfom->cookie;
574                         if (cookie->len <= 0 && tfom->try_exp == 1)
575                                 cookie->exp = true;
576                 } while (read_seqretry(&fastopen_seqlock, seq));
577         }
578         rcu_read_unlock();
579 }
580
581 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
582                             struct tcp_fastopen_cookie *cookie, bool syn_lost,
583                             u16 try_exp)
584 {
585         struct dst_entry *dst = __sk_dst_get(sk);
586         struct tcp_metrics_block *tm;
587
588         if (!dst)
589                 return;
590         rcu_read_lock();
591         tm = tcp_get_metrics(sk, dst, true);
592         if (tm) {
593                 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
594
595                 write_seqlock_bh(&fastopen_seqlock);
596                 if (mss)
597                         tfom->mss = mss;
598                 if (cookie && cookie->len > 0)
599                         tfom->cookie = *cookie;
600                 else if (try_exp > tfom->try_exp &&
601                          tfom->cookie.len <= 0 && !tfom->cookie.exp)
602                         tfom->try_exp = try_exp;
603                 if (syn_lost) {
604                         ++tfom->syn_loss;
605                         tfom->last_syn_loss = jiffies;
606                 } else
607                         tfom->syn_loss = 0;
608                 write_sequnlock_bh(&fastopen_seqlock);
609         }
610         rcu_read_unlock();
611 }
612
613 static struct genl_family tcp_metrics_nl_family;
614
615 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
616         [TCP_METRICS_ATTR_ADDR_IPV4]    = { .type = NLA_U32, },
617         [TCP_METRICS_ATTR_ADDR_IPV6]    = { .type = NLA_BINARY,
618                                             .len = sizeof(struct in6_addr), },
619         /* Following attributes are not received for GET/DEL,
620          * we keep them for reference
621          */
622 #if 0
623         [TCP_METRICS_ATTR_AGE]          = { .type = NLA_MSECS, },
624         [TCP_METRICS_ATTR_TW_TSVAL]     = { .type = NLA_U32, },
625         [TCP_METRICS_ATTR_TW_TS_STAMP]  = { .type = NLA_S32, },
626         [TCP_METRICS_ATTR_VALS]         = { .type = NLA_NESTED, },
627         [TCP_METRICS_ATTR_FOPEN_MSS]    = { .type = NLA_U16, },
628         [TCP_METRICS_ATTR_FOPEN_SYN_DROPS]      = { .type = NLA_U16, },
629         [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]    = { .type = NLA_MSECS, },
630         [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
631                                             .len = TCP_FASTOPEN_COOKIE_MAX, },
632 #endif
633 };
634
635 /* Add attributes, caller cancels its header on failure */
636 static int tcp_metrics_fill_info(struct sk_buff *msg,
637                                  struct tcp_metrics_block *tm)
638 {
639         struct nlattr *nest;
640         int i;
641
642         switch (tm->tcpm_daddr.family) {
643         case AF_INET:
644                 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
645                                     inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
646                         goto nla_put_failure;
647                 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
648                                     inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
649                         goto nla_put_failure;
650                 break;
651         case AF_INET6:
652                 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
653                                      inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
654                         goto nla_put_failure;
655                 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
656                                      inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
657                         goto nla_put_failure;
658                 break;
659         default:
660                 return -EAFNOSUPPORT;
661         }
662
663         if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
664                           jiffies - READ_ONCE(tm->tcpm_stamp),
665                           TCP_METRICS_ATTR_PAD) < 0)
666                 goto nla_put_failure;
667
668         {
669                 int n = 0;
670
671                 nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
672                 if (!nest)
673                         goto nla_put_failure;
674                 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
675                         u32 val = tcp_metric_get(tm, i);
676
677                         if (!val)
678                                 continue;
679                         if (i == TCP_METRIC_RTT) {
680                                 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
681                                                 val) < 0)
682                                         goto nla_put_failure;
683                                 n++;
684                                 val = max(val / 1000, 1U);
685                         }
686                         if (i == TCP_METRIC_RTTVAR) {
687                                 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
688                                                 val) < 0)
689                                         goto nla_put_failure;
690                                 n++;
691                                 val = max(val / 1000, 1U);
692                         }
693                         if (nla_put_u32(msg, i + 1, val) < 0)
694                                 goto nla_put_failure;
695                         n++;
696                 }
697                 if (n)
698                         nla_nest_end(msg, nest);
699                 else
700                         nla_nest_cancel(msg, nest);
701         }
702
703         {
704                 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
705                 unsigned int seq;
706
707                 do {
708                         seq = read_seqbegin(&fastopen_seqlock);
709                         tfom_copy[0] = tm->tcpm_fastopen;
710                 } while (read_seqretry(&fastopen_seqlock, seq));
711
712                 tfom = tfom_copy;
713                 if (tfom->mss &&
714                     nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
715                                 tfom->mss) < 0)
716                         goto nla_put_failure;
717                 if (tfom->syn_loss &&
718                     (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
719                                 tfom->syn_loss) < 0 ||
720                      nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
721                                 jiffies - tfom->last_syn_loss,
722                                 TCP_METRICS_ATTR_PAD) < 0))
723                         goto nla_put_failure;
724                 if (tfom->cookie.len > 0 &&
725                     nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
726                             tfom->cookie.len, tfom->cookie.val) < 0)
727                         goto nla_put_failure;
728         }
729
730         return 0;
731
732 nla_put_failure:
733         return -EMSGSIZE;
734 }
735
736 static int tcp_metrics_dump_info(struct sk_buff *skb,
737                                  struct netlink_callback *cb,
738                                  struct tcp_metrics_block *tm)
739 {
740         void *hdr;
741
742         hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
743                           &tcp_metrics_nl_family, NLM_F_MULTI,
744                           TCP_METRICS_CMD_GET);
745         if (!hdr)
746                 return -EMSGSIZE;
747
748         if (tcp_metrics_fill_info(skb, tm) < 0)
749                 goto nla_put_failure;
750
751         genlmsg_end(skb, hdr);
752         return 0;
753
754 nla_put_failure:
755         genlmsg_cancel(skb, hdr);
756         return -EMSGSIZE;
757 }
758
759 static int tcp_metrics_nl_dump(struct sk_buff *skb,
760                                struct netlink_callback *cb)
761 {
762         struct net *net = sock_net(skb->sk);
763         unsigned int max_rows = 1U << tcp_metrics_hash_log;
764         unsigned int row, s_row = cb->args[0];
765         int s_col = cb->args[1], col = s_col;
766
767         for (row = s_row; row < max_rows; row++, s_col = 0) {
768                 struct tcp_metrics_block *tm;
769                 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
770
771                 rcu_read_lock();
772                 for (col = 0, tm = rcu_dereference(hb->chain); tm;
773                      tm = rcu_dereference(tm->tcpm_next), col++) {
774                         if (!net_eq(tm_net(tm), net))
775                                 continue;
776                         if (col < s_col)
777                                 continue;
778                         if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
779                                 rcu_read_unlock();
780                                 goto done;
781                         }
782                 }
783                 rcu_read_unlock();
784         }
785
786 done:
787         cb->args[0] = row;
788         cb->args[1] = col;
789         return skb->len;
790 }
791
792 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
793                            unsigned int *hash, int optional, int v4, int v6)
794 {
795         struct nlattr *a;
796
797         a = info->attrs[v4];
798         if (a) {
799                 inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
800                 if (hash)
801                         *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
802                 return 0;
803         }
804         a = info->attrs[v6];
805         if (a) {
806                 struct in6_addr in6;
807
808                 if (nla_len(a) != sizeof(struct in6_addr))
809                         return -EINVAL;
810                 in6 = nla_get_in6_addr(a);
811                 inetpeer_set_addr_v6(addr, &in6);
812                 if (hash)
813                         *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
814                 return 0;
815         }
816         return optional ? 1 : -EAFNOSUPPORT;
817 }
818
819 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
820                          unsigned int *hash, int optional)
821 {
822         return __parse_nl_addr(info, addr, hash, optional,
823                                TCP_METRICS_ATTR_ADDR_IPV4,
824                                TCP_METRICS_ATTR_ADDR_IPV6);
825 }
826
827 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
828 {
829         return __parse_nl_addr(info, addr, NULL, 0,
830                                TCP_METRICS_ATTR_SADDR_IPV4,
831                                TCP_METRICS_ATTR_SADDR_IPV6);
832 }
833
834 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
835 {
836         struct tcp_metrics_block *tm;
837         struct inetpeer_addr saddr, daddr;
838         unsigned int hash;
839         struct sk_buff *msg;
840         struct net *net = genl_info_net(info);
841         void *reply;
842         int ret;
843         bool src = true;
844
845         ret = parse_nl_addr(info, &daddr, &hash, 0);
846         if (ret < 0)
847                 return ret;
848
849         ret = parse_nl_saddr(info, &saddr);
850         if (ret < 0)
851                 src = false;
852
853         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
854         if (!msg)
855                 return -ENOMEM;
856
857         reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
858                                   info->genlhdr->cmd);
859         if (!reply)
860                 goto nla_put_failure;
861
862         hash ^= net_hash_mix(net);
863         hash = hash_32(hash, tcp_metrics_hash_log);
864         ret = -ESRCH;
865         rcu_read_lock();
866         for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
867              tm = rcu_dereference(tm->tcpm_next)) {
868                 if (addr_same(&tm->tcpm_daddr, &daddr) &&
869                     (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
870                     net_eq(tm_net(tm), net)) {
871                         ret = tcp_metrics_fill_info(msg, tm);
872                         break;
873                 }
874         }
875         rcu_read_unlock();
876         if (ret < 0)
877                 goto out_free;
878
879         genlmsg_end(msg, reply);
880         return genlmsg_reply(msg, info);
881
882 nla_put_failure:
883         ret = -EMSGSIZE;
884
885 out_free:
886         nlmsg_free(msg);
887         return ret;
888 }
889
890 static void tcp_metrics_flush_all(struct net *net)
891 {
892         unsigned int max_rows = 1U << tcp_metrics_hash_log;
893         struct tcpm_hash_bucket *hb = tcp_metrics_hash;
894         struct tcp_metrics_block *tm;
895         unsigned int row;
896
897         for (row = 0; row < max_rows; row++, hb++) {
898                 struct tcp_metrics_block __rcu **pp;
899                 bool match;
900
901                 spin_lock_bh(&tcp_metrics_lock);
902                 pp = &hb->chain;
903                 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
904                         match = net ? net_eq(tm_net(tm), net) :
905                                 !refcount_read(&tm_net(tm)->ns.count);
906                         if (match) {
907                                 *pp = tm->tcpm_next;
908                                 kfree_rcu(tm, rcu_head);
909                         } else {
910                                 pp = &tm->tcpm_next;
911                         }
912                 }
913                 spin_unlock_bh(&tcp_metrics_lock);
914         }
915 }
916
917 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
918 {
919         struct tcpm_hash_bucket *hb;
920         struct tcp_metrics_block *tm;
921         struct tcp_metrics_block __rcu **pp;
922         struct inetpeer_addr saddr, daddr;
923         unsigned int hash;
924         struct net *net = genl_info_net(info);
925         int ret;
926         bool src = true, found = false;
927
928         ret = parse_nl_addr(info, &daddr, &hash, 1);
929         if (ret < 0)
930                 return ret;
931         if (ret > 0) {
932                 tcp_metrics_flush_all(net);
933                 return 0;
934         }
935         ret = parse_nl_saddr(info, &saddr);
936         if (ret < 0)
937                 src = false;
938
939         hash ^= net_hash_mix(net);
940         hash = hash_32(hash, tcp_metrics_hash_log);
941         hb = tcp_metrics_hash + hash;
942         pp = &hb->chain;
943         spin_lock_bh(&tcp_metrics_lock);
944         for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
945                 if (addr_same(&tm->tcpm_daddr, &daddr) &&
946                     (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
947                     net_eq(tm_net(tm), net)) {
948                         *pp = tm->tcpm_next;
949                         kfree_rcu(tm, rcu_head);
950                         found = true;
951                 } else {
952                         pp = &tm->tcpm_next;
953                 }
954         }
955         spin_unlock_bh(&tcp_metrics_lock);
956         if (!found)
957                 return -ESRCH;
958         return 0;
959 }
960
961 static const struct genl_small_ops tcp_metrics_nl_ops[] = {
962         {
963                 .cmd = TCP_METRICS_CMD_GET,
964                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
965                 .doit = tcp_metrics_nl_cmd_get,
966                 .dumpit = tcp_metrics_nl_dump,
967         },
968         {
969                 .cmd = TCP_METRICS_CMD_DEL,
970                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
971                 .doit = tcp_metrics_nl_cmd_del,
972                 .flags = GENL_ADMIN_PERM,
973         },
974 };
975
976 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
977         .hdrsize        = 0,
978         .name           = TCP_METRICS_GENL_NAME,
979         .version        = TCP_METRICS_GENL_VERSION,
980         .maxattr        = TCP_METRICS_ATTR_MAX,
981         .policy = tcp_metrics_nl_policy,
982         .netnsok        = true,
983         .module         = THIS_MODULE,
984         .small_ops      = tcp_metrics_nl_ops,
985         .n_small_ops    = ARRAY_SIZE(tcp_metrics_nl_ops),
986         .resv_start_op  = TCP_METRICS_CMD_DEL + 1,
987 };
988
989 static unsigned int tcpmhash_entries;
990 static int __init set_tcpmhash_entries(char *str)
991 {
992         ssize_t ret;
993
994         if (!str)
995                 return 0;
996
997         ret = kstrtouint(str, 0, &tcpmhash_entries);
998         if (ret)
999                 return 0;
1000
1001         return 1;
1002 }
1003 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1004
1005 static int __net_init tcp_net_metrics_init(struct net *net)
1006 {
1007         size_t size;
1008         unsigned int slots;
1009
1010         if (!net_eq(net, &init_net))
1011                 return 0;
1012
1013         slots = tcpmhash_entries;
1014         if (!slots) {
1015                 if (totalram_pages() >= 128 * 1024)
1016                         slots = 16 * 1024;
1017                 else
1018                         slots = 8 * 1024;
1019         }
1020
1021         tcp_metrics_hash_log = order_base_2(slots);
1022         size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1023
1024         tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1025         if (!tcp_metrics_hash)
1026                 return -ENOMEM;
1027
1028         return 0;
1029 }
1030
1031 static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1032 {
1033         tcp_metrics_flush_all(NULL);
1034 }
1035
1036 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1037         .init           =       tcp_net_metrics_init,
1038         .exit_batch     =       tcp_net_metrics_exit_batch,
1039 };
1040
1041 void __init tcp_metrics_init(void)
1042 {
1043         int ret;
1044
1045         ret = register_pernet_subsys(&tcp_net_metrics_ops);
1046         if (ret < 0)
1047                 panic("Could not allocate the tcp_metrics hash table\n");
1048
1049         ret = genl_register_family(&tcp_metrics_nl_family);
1050         if (ret < 0)
1051                 panic("Could not register tcp_metrics generic netlink\n");
1052 }