1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/rcupdate.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
10 #include <linux/hash.h>
11 #include <linux/tcp_metrics.h>
12 #include <linux/vmalloc.h>
14 #include <net/inet_connection_sock.h>
15 #include <net/net_namespace.h>
16 #include <net/request_sock.h>
17 #include <net/inetpeer.h>
22 #include <net/genetlink.h>
24 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
25 const struct inetpeer_addr *daddr,
26 struct net *net, unsigned int hash);
28 struct tcp_fastopen_metrics {
30 u16 syn_loss:10, /* Recurring Fast Open SYN losses */
31 try_exp:2; /* Request w/ exp. option (once) */
32 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
33 struct tcp_fastopen_cookie cookie;
36 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37 * Kernel only stores RTT and RTTVAR in usec resolution
39 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
41 struct tcp_metrics_block {
42 struct tcp_metrics_block __rcu *tcpm_next;
43 possible_net_t tcpm_net;
44 struct inetpeer_addr tcpm_saddr;
45 struct inetpeer_addr tcpm_daddr;
46 unsigned long tcpm_stamp;
48 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
49 struct tcp_fastopen_metrics tcpm_fastopen;
51 struct rcu_head rcu_head;
54 static inline struct net *tm_net(struct tcp_metrics_block *tm)
56 return read_pnet(&tm->tcpm_net);
59 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
60 enum tcp_metric_index idx)
62 /* Paired with WRITE_ONCE() in tcpm_suck_dst() */
63 return READ_ONCE(tm->tcpm_lock) & (1 << idx);
66 static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
67 enum tcp_metric_index idx)
69 /* Paired with WRITE_ONCE() in tcp_metric_set() */
70 return READ_ONCE(tm->tcpm_vals[idx]);
73 static void tcp_metric_set(struct tcp_metrics_block *tm,
74 enum tcp_metric_index idx,
77 /* Paired with READ_ONCE() in tcp_metric_get() */
78 WRITE_ONCE(tm->tcpm_vals[idx], val);
81 static bool addr_same(const struct inetpeer_addr *a,
82 const struct inetpeer_addr *b)
84 return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
87 struct tcpm_hash_bucket {
88 struct tcp_metrics_block __rcu *chain;
91 static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
92 static unsigned int tcp_metrics_hash_log __read_mostly;
94 static DEFINE_SPINLOCK(tcp_metrics_lock);
96 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
97 const struct dst_entry *dst,
103 WRITE_ONCE(tm->tcpm_stamp, jiffies);
106 if (dst_metric_locked(dst, RTAX_RTT))
107 val |= 1 << TCP_METRIC_RTT;
108 if (dst_metric_locked(dst, RTAX_RTTVAR))
109 val |= 1 << TCP_METRIC_RTTVAR;
110 if (dst_metric_locked(dst, RTAX_SSTHRESH))
111 val |= 1 << TCP_METRIC_SSTHRESH;
112 if (dst_metric_locked(dst, RTAX_CWND))
113 val |= 1 << TCP_METRIC_CWND;
114 if (dst_metric_locked(dst, RTAX_REORDERING))
115 val |= 1 << TCP_METRIC_REORDERING;
116 /* Paired with READ_ONCE() in tcp_metric_locked() */
117 WRITE_ONCE(tm->tcpm_lock, val);
119 msval = dst_metric_raw(dst, RTAX_RTT);
120 tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
122 msval = dst_metric_raw(dst, RTAX_RTTVAR);
123 tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
124 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
125 dst_metric_raw(dst, RTAX_SSTHRESH));
126 tcp_metric_set(tm, TCP_METRIC_CWND,
127 dst_metric_raw(dst, RTAX_CWND));
128 tcp_metric_set(tm, TCP_METRIC_REORDERING,
129 dst_metric_raw(dst, RTAX_REORDERING));
130 if (fastopen_clear) {
131 tm->tcpm_fastopen.mss = 0;
132 tm->tcpm_fastopen.syn_loss = 0;
133 tm->tcpm_fastopen.try_exp = 0;
134 tm->tcpm_fastopen.cookie.exp = false;
135 tm->tcpm_fastopen.cookie.len = 0;
139 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
141 static void tcpm_check_stamp(struct tcp_metrics_block *tm,
142 const struct dst_entry *dst)
148 limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
149 if (unlikely(time_after(jiffies, limit)))
150 tcpm_suck_dst(tm, dst, false);
153 #define TCP_METRICS_RECLAIM_DEPTH 5
154 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
156 #define deref_locked(p) \
157 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
159 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
160 struct inetpeer_addr *saddr,
161 struct inetpeer_addr *daddr,
164 struct tcp_metrics_block *tm;
166 bool reclaim = false;
168 spin_lock_bh(&tcp_metrics_lock);
169 net = dev_net(dst->dev);
171 /* While waiting for the spin-lock the cache might have been populated
172 * with this entry and so we have to check again.
174 tm = __tcp_get_metrics(saddr, daddr, net, hash);
175 if (tm == TCP_METRICS_RECLAIM_PTR) {
180 tcpm_check_stamp(tm, dst);
184 if (unlikely(reclaim)) {
185 struct tcp_metrics_block *oldest;
187 oldest = deref_locked(tcp_metrics_hash[hash].chain);
188 for (tm = deref_locked(oldest->tcpm_next); tm;
189 tm = deref_locked(tm->tcpm_next)) {
190 if (time_before(READ_ONCE(tm->tcpm_stamp),
191 READ_ONCE(oldest->tcpm_stamp)))
196 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
200 write_pnet(&tm->tcpm_net, net);
201 tm->tcpm_saddr = *saddr;
202 tm->tcpm_daddr = *daddr;
204 tcpm_suck_dst(tm, dst, true);
206 if (likely(!reclaim)) {
207 tm->tcpm_next = tcp_metrics_hash[hash].chain;
208 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
212 spin_unlock_bh(&tcp_metrics_lock);
216 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
220 if (depth > TCP_METRICS_RECLAIM_DEPTH)
221 return TCP_METRICS_RECLAIM_PTR;
225 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
226 const struct inetpeer_addr *daddr,
227 struct net *net, unsigned int hash)
229 struct tcp_metrics_block *tm;
232 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
233 tm = rcu_dereference(tm->tcpm_next)) {
234 if (addr_same(&tm->tcpm_saddr, saddr) &&
235 addr_same(&tm->tcpm_daddr, daddr) &&
236 net_eq(tm_net(tm), net))
240 return tcp_get_encode(tm, depth);
243 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
244 struct dst_entry *dst)
246 struct tcp_metrics_block *tm;
247 struct inetpeer_addr saddr, daddr;
251 saddr.family = req->rsk_ops->family;
252 daddr.family = req->rsk_ops->family;
253 switch (daddr.family) {
255 inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
256 inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
257 hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
259 #if IS_ENABLED(CONFIG_IPV6)
261 inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
262 inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
263 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
270 net = dev_net(dst->dev);
271 hash ^= net_hash_mix(net);
272 hash = hash_32(hash, tcp_metrics_hash_log);
274 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
275 tm = rcu_dereference(tm->tcpm_next)) {
276 if (addr_same(&tm->tcpm_saddr, &saddr) &&
277 addr_same(&tm->tcpm_daddr, &daddr) &&
278 net_eq(tm_net(tm), net))
281 tcpm_check_stamp(tm, dst);
285 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
286 struct dst_entry *dst,
289 struct tcp_metrics_block *tm;
290 struct inetpeer_addr saddr, daddr;
294 if (sk->sk_family == AF_INET) {
295 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
296 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
297 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
299 #if IS_ENABLED(CONFIG_IPV6)
300 else if (sk->sk_family == AF_INET6) {
301 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
302 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
303 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
304 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
306 inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
307 inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
308 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
315 net = dev_net(dst->dev);
316 hash ^= net_hash_mix(net);
317 hash = hash_32(hash, tcp_metrics_hash_log);
319 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
320 if (tm == TCP_METRICS_RECLAIM_PTR)
323 tm = tcpm_new(dst, &saddr, &daddr, hash);
325 tcpm_check_stamp(tm, dst);
330 /* Save metrics learned by this TCP session. This function is called
331 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
332 * or goes from LAST-ACK to CLOSE.
334 void tcp_update_metrics(struct sock *sk)
336 const struct inet_connection_sock *icsk = inet_csk(sk);
337 struct dst_entry *dst = __sk_dst_get(sk);
338 struct tcp_sock *tp = tcp_sk(sk);
339 struct net *net = sock_net(sk);
340 struct tcp_metrics_block *tm;
346 if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
350 if (icsk->icsk_backoff || !tp->srtt_us) {
351 /* This session failed to estimate rtt. Why?
352 * Probably, no packets returned in time. Reset our
355 tm = tcp_get_metrics(sk, dst, false);
356 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
357 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
360 tm = tcp_get_metrics(sk, dst, true);
365 rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
366 m = rtt - tp->srtt_us;
368 /* If newly calculated rtt larger than stored one, store new
369 * one. Otherwise, use EWMA. Remember, rtt overestimation is
370 * always better than underestimation.
372 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
377 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
380 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
386 /* Scale deviation to rttvar fixed point */
391 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
395 var -= (var - m) >> 2;
397 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
400 if (tcp_in_initial_slowstart(tp)) {
401 /* Slow start still did not finish. */
402 if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
403 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
404 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
405 if (val && (tcp_snd_cwnd(tp) >> 1) > val)
406 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
407 tcp_snd_cwnd(tp) >> 1);
409 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
410 val = tcp_metric_get(tm, TCP_METRIC_CWND);
411 if (tcp_snd_cwnd(tp) > val)
412 tcp_metric_set(tm, TCP_METRIC_CWND,
415 } else if (!tcp_in_slow_start(tp) &&
416 icsk->icsk_ca_state == TCP_CA_Open) {
417 /* Cong. avoidance phase, cwnd is reliable. */
418 if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
419 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
420 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
421 max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
422 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
423 val = tcp_metric_get(tm, TCP_METRIC_CWND);
424 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1);
427 /* Else slow start did not finish, cwnd is non-sense,
428 * ssthresh may be also invalid.
430 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
431 val = tcp_metric_get(tm, TCP_METRIC_CWND);
432 tcp_metric_set(tm, TCP_METRIC_CWND,
433 (val + tp->snd_ssthresh) >> 1);
435 if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
436 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
437 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
438 if (val && tp->snd_ssthresh > val)
439 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
442 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
443 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
444 if (val < tp->reordering &&
446 READ_ONCE(net->ipv4.sysctl_tcp_reordering))
447 tcp_metric_set(tm, TCP_METRIC_REORDERING,
451 WRITE_ONCE(tm->tcpm_stamp, jiffies);
456 /* Initialize metrics on socket. */
458 void tcp_init_metrics(struct sock *sk)
460 struct dst_entry *dst = __sk_dst_get(sk);
461 struct tcp_sock *tp = tcp_sk(sk);
462 struct net *net = sock_net(sk);
463 struct tcp_metrics_block *tm;
464 u32 val, crtt = 0; /* cached RTT scaled by 8 */
471 tm = tcp_get_metrics(sk, dst, true);
477 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
478 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
480 val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
481 0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
483 tp->snd_ssthresh = val;
484 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
485 tp->snd_ssthresh = tp->snd_cwnd_clamp;
487 /* ssthresh may have been reduced unnecessarily during.
488 * 3WHS. Restore it back to its initial default.
490 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
492 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
493 if (val && tp->reordering != val)
494 tp->reordering = val;
496 crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
499 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
500 * to seed the RTO for later data packets because SYN packets are
501 * small. Use the per-dst cached values to seed the RTO but keep
502 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
503 * Later the RTO will be updated immediately upon obtaining the first
504 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
505 * influences the first RTO but not later RTT estimation.
507 * But if RTT is not available from the SYN (due to retransmits or
508 * syn cookies) or the cache, force a conservative 3secs timeout.
510 * A bit of theory. RTT is time passed after "normal" sized packet
511 * is sent until it is ACKed. In normal circumstances sending small
512 * packets force peer to delay ACKs and calculation is correct too.
513 * The algorithm is adaptive and, provided we follow specs, it
514 * NEVER underestimate RTT. BUT! If peer tries to make some clever
515 * tricks sort of "quick acks" for time long enough to decrease RTT
516 * to low value, and then abruptly stops to do it and starts to delay
517 * ACKs, wait for troubles.
519 if (crtt > tp->srtt_us) {
520 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
521 crtt /= 8 * USEC_PER_SEC / HZ;
522 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
523 } else if (tp->srtt_us == 0) {
524 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
525 * 3WHS. This is most likely due to retransmission,
526 * including spurious one. Reset the RTO back to 3secs
527 * from the more aggressive 1sec to avoid more spurious
530 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
531 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
533 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
537 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
539 struct tcp_metrics_block *tm;
546 tm = __tcp_get_metrics_req(req, dst);
547 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
556 static DEFINE_SEQLOCK(fastopen_seqlock);
558 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
559 struct tcp_fastopen_cookie *cookie)
561 struct tcp_metrics_block *tm;
564 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
566 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
570 seq = read_seqbegin(&fastopen_seqlock);
573 *cookie = tfom->cookie;
574 if (cookie->len <= 0 && tfom->try_exp == 1)
576 } while (read_seqretry(&fastopen_seqlock, seq));
581 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
582 struct tcp_fastopen_cookie *cookie, bool syn_lost,
585 struct dst_entry *dst = __sk_dst_get(sk);
586 struct tcp_metrics_block *tm;
591 tm = tcp_get_metrics(sk, dst, true);
593 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
595 write_seqlock_bh(&fastopen_seqlock);
598 if (cookie && cookie->len > 0)
599 tfom->cookie = *cookie;
600 else if (try_exp > tfom->try_exp &&
601 tfom->cookie.len <= 0 && !tfom->cookie.exp)
602 tfom->try_exp = try_exp;
605 tfom->last_syn_loss = jiffies;
608 write_sequnlock_bh(&fastopen_seqlock);
613 static struct genl_family tcp_metrics_nl_family;
615 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
616 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
617 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
618 .len = sizeof(struct in6_addr), },
619 /* Following attributes are not received for GET/DEL,
620 * we keep them for reference
623 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
624 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
625 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
626 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
627 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
628 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
629 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
630 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
631 .len = TCP_FASTOPEN_COOKIE_MAX, },
635 /* Add attributes, caller cancels its header on failure */
636 static int tcp_metrics_fill_info(struct sk_buff *msg,
637 struct tcp_metrics_block *tm)
642 switch (tm->tcpm_daddr.family) {
644 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
645 inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
646 goto nla_put_failure;
647 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
648 inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
649 goto nla_put_failure;
652 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
653 inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
654 goto nla_put_failure;
655 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
656 inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
657 goto nla_put_failure;
660 return -EAFNOSUPPORT;
663 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
664 jiffies - READ_ONCE(tm->tcpm_stamp),
665 TCP_METRICS_ATTR_PAD) < 0)
666 goto nla_put_failure;
671 nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
673 goto nla_put_failure;
674 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
675 u32 val = tcp_metric_get(tm, i);
679 if (i == TCP_METRIC_RTT) {
680 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
682 goto nla_put_failure;
684 val = max(val / 1000, 1U);
686 if (i == TCP_METRIC_RTTVAR) {
687 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
689 goto nla_put_failure;
691 val = max(val / 1000, 1U);
693 if (nla_put_u32(msg, i + 1, val) < 0)
694 goto nla_put_failure;
698 nla_nest_end(msg, nest);
700 nla_nest_cancel(msg, nest);
704 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
708 seq = read_seqbegin(&fastopen_seqlock);
709 tfom_copy[0] = tm->tcpm_fastopen;
710 } while (read_seqretry(&fastopen_seqlock, seq));
714 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
716 goto nla_put_failure;
717 if (tfom->syn_loss &&
718 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
719 tfom->syn_loss) < 0 ||
720 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
721 jiffies - tfom->last_syn_loss,
722 TCP_METRICS_ATTR_PAD) < 0))
723 goto nla_put_failure;
724 if (tfom->cookie.len > 0 &&
725 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
726 tfom->cookie.len, tfom->cookie.val) < 0)
727 goto nla_put_failure;
736 static int tcp_metrics_dump_info(struct sk_buff *skb,
737 struct netlink_callback *cb,
738 struct tcp_metrics_block *tm)
742 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
743 &tcp_metrics_nl_family, NLM_F_MULTI,
744 TCP_METRICS_CMD_GET);
748 if (tcp_metrics_fill_info(skb, tm) < 0)
749 goto nla_put_failure;
751 genlmsg_end(skb, hdr);
755 genlmsg_cancel(skb, hdr);
759 static int tcp_metrics_nl_dump(struct sk_buff *skb,
760 struct netlink_callback *cb)
762 struct net *net = sock_net(skb->sk);
763 unsigned int max_rows = 1U << tcp_metrics_hash_log;
764 unsigned int row, s_row = cb->args[0];
765 int s_col = cb->args[1], col = s_col;
767 for (row = s_row; row < max_rows; row++, s_col = 0) {
768 struct tcp_metrics_block *tm;
769 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
772 for (col = 0, tm = rcu_dereference(hb->chain); tm;
773 tm = rcu_dereference(tm->tcpm_next), col++) {
774 if (!net_eq(tm_net(tm), net))
778 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
792 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
793 unsigned int *hash, int optional, int v4, int v6)
799 inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
801 *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
808 if (nla_len(a) != sizeof(struct in6_addr))
810 in6 = nla_get_in6_addr(a);
811 inetpeer_set_addr_v6(addr, &in6);
813 *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
816 return optional ? 1 : -EAFNOSUPPORT;
819 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
820 unsigned int *hash, int optional)
822 return __parse_nl_addr(info, addr, hash, optional,
823 TCP_METRICS_ATTR_ADDR_IPV4,
824 TCP_METRICS_ATTR_ADDR_IPV6);
827 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
829 return __parse_nl_addr(info, addr, NULL, 0,
830 TCP_METRICS_ATTR_SADDR_IPV4,
831 TCP_METRICS_ATTR_SADDR_IPV6);
834 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
836 struct tcp_metrics_block *tm;
837 struct inetpeer_addr saddr, daddr;
840 struct net *net = genl_info_net(info);
845 ret = parse_nl_addr(info, &daddr, &hash, 0);
849 ret = parse_nl_saddr(info, &saddr);
853 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
857 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
860 goto nla_put_failure;
862 hash ^= net_hash_mix(net);
863 hash = hash_32(hash, tcp_metrics_hash_log);
866 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
867 tm = rcu_dereference(tm->tcpm_next)) {
868 if (addr_same(&tm->tcpm_daddr, &daddr) &&
869 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
870 net_eq(tm_net(tm), net)) {
871 ret = tcp_metrics_fill_info(msg, tm);
879 genlmsg_end(msg, reply);
880 return genlmsg_reply(msg, info);
890 static void tcp_metrics_flush_all(struct net *net)
892 unsigned int max_rows = 1U << tcp_metrics_hash_log;
893 struct tcpm_hash_bucket *hb = tcp_metrics_hash;
894 struct tcp_metrics_block *tm;
897 for (row = 0; row < max_rows; row++, hb++) {
898 struct tcp_metrics_block __rcu **pp;
901 spin_lock_bh(&tcp_metrics_lock);
903 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
904 match = net ? net_eq(tm_net(tm), net) :
905 !refcount_read(&tm_net(tm)->ns.count);
908 kfree_rcu(tm, rcu_head);
913 spin_unlock_bh(&tcp_metrics_lock);
917 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
919 struct tcpm_hash_bucket *hb;
920 struct tcp_metrics_block *tm;
921 struct tcp_metrics_block __rcu **pp;
922 struct inetpeer_addr saddr, daddr;
924 struct net *net = genl_info_net(info);
926 bool src = true, found = false;
928 ret = parse_nl_addr(info, &daddr, &hash, 1);
932 tcp_metrics_flush_all(net);
935 ret = parse_nl_saddr(info, &saddr);
939 hash ^= net_hash_mix(net);
940 hash = hash_32(hash, tcp_metrics_hash_log);
941 hb = tcp_metrics_hash + hash;
943 spin_lock_bh(&tcp_metrics_lock);
944 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
945 if (addr_same(&tm->tcpm_daddr, &daddr) &&
946 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
947 net_eq(tm_net(tm), net)) {
949 kfree_rcu(tm, rcu_head);
955 spin_unlock_bh(&tcp_metrics_lock);
961 static const struct genl_small_ops tcp_metrics_nl_ops[] = {
963 .cmd = TCP_METRICS_CMD_GET,
964 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
965 .doit = tcp_metrics_nl_cmd_get,
966 .dumpit = tcp_metrics_nl_dump,
969 .cmd = TCP_METRICS_CMD_DEL,
970 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
971 .doit = tcp_metrics_nl_cmd_del,
972 .flags = GENL_ADMIN_PERM,
976 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
978 .name = TCP_METRICS_GENL_NAME,
979 .version = TCP_METRICS_GENL_VERSION,
980 .maxattr = TCP_METRICS_ATTR_MAX,
981 .policy = tcp_metrics_nl_policy,
983 .module = THIS_MODULE,
984 .small_ops = tcp_metrics_nl_ops,
985 .n_small_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
986 .resv_start_op = TCP_METRICS_CMD_DEL + 1,
989 static unsigned int tcpmhash_entries;
990 static int __init set_tcpmhash_entries(char *str)
997 ret = kstrtouint(str, 0, &tcpmhash_entries);
1003 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1005 static int __net_init tcp_net_metrics_init(struct net *net)
1010 if (!net_eq(net, &init_net))
1013 slots = tcpmhash_entries;
1015 if (totalram_pages() >= 128 * 1024)
1021 tcp_metrics_hash_log = order_base_2(slots);
1022 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1024 tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1025 if (!tcp_metrics_hash)
1031 static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1033 tcp_metrics_flush_all(NULL);
1036 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1037 .init = tcp_net_metrics_init,
1038 .exit_batch = tcp_net_metrics_exit_batch,
1041 void __init tcp_metrics_init(void)
1045 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1047 panic("Could not allocate the tcp_metrics hash table\n");
1049 ret = genl_register_family(&tcp_metrics_nl_family);
1051 panic("Could not register tcp_metrics generic netlink\n");