tcp: use hash_32() in tcp_metrics
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / ipv4 / tcp_metrics.c
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/bootmem.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/tcp.h>
10 #include <linux/hash.h>
11
12 #include <net/inet_connection_sock.h>
13 #include <net/net_namespace.h>
14 #include <net/request_sock.h>
15 #include <net/inetpeer.h>
16 #include <net/sock.h>
17 #include <net/ipv6.h>
18 #include <net/dst.h>
19 #include <net/tcp.h>
20
21 int sysctl_tcp_nometrics_save __read_mostly;
22
23 enum tcp_metric_index {
24         TCP_METRIC_RTT,
25         TCP_METRIC_RTTVAR,
26         TCP_METRIC_SSTHRESH,
27         TCP_METRIC_CWND,
28         TCP_METRIC_REORDERING,
29
30         /* Always last.  */
31         TCP_METRIC_MAX,
32 };
33
34 struct tcp_fastopen_metrics {
35         u16     mss;
36         u16     syn_loss:10;            /* Recurring Fast Open SYN losses */
37         unsigned long   last_syn_loss;  /* Last Fast Open SYN loss */
38         struct  tcp_fastopen_cookie     cookie;
39 };
40
41 struct tcp_metrics_block {
42         struct tcp_metrics_block __rcu  *tcpm_next;
43         struct inetpeer_addr            tcpm_addr;
44         unsigned long                   tcpm_stamp;
45         u32                             tcpm_ts;
46         u32                             tcpm_ts_stamp;
47         u32                             tcpm_lock;
48         u32                             tcpm_vals[TCP_METRIC_MAX];
49         struct tcp_fastopen_metrics     tcpm_fastopen;
50 };
51
52 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
53                               enum tcp_metric_index idx)
54 {
55         return tm->tcpm_lock & (1 << idx);
56 }
57
58 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
59                           enum tcp_metric_index idx)
60 {
61         return tm->tcpm_vals[idx];
62 }
63
64 static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
65                                   enum tcp_metric_index idx)
66 {
67         return msecs_to_jiffies(tm->tcpm_vals[idx]);
68 }
69
70 static void tcp_metric_set(struct tcp_metrics_block *tm,
71                            enum tcp_metric_index idx,
72                            u32 val)
73 {
74         tm->tcpm_vals[idx] = val;
75 }
76
77 static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
78                                  enum tcp_metric_index idx,
79                                  u32 val)
80 {
81         tm->tcpm_vals[idx] = jiffies_to_msecs(val);
82 }
83
84 static bool addr_same(const struct inetpeer_addr *a,
85                       const struct inetpeer_addr *b)
86 {
87         const struct in6_addr *a6, *b6;
88
89         if (a->family != b->family)
90                 return false;
91         if (a->family == AF_INET)
92                 return a->addr.a4 == b->addr.a4;
93
94         a6 = (const struct in6_addr *) &a->addr.a6[0];
95         b6 = (const struct in6_addr *) &b->addr.a6[0];
96
97         return ipv6_addr_equal(a6, b6);
98 }
99
100 struct tcpm_hash_bucket {
101         struct tcp_metrics_block __rcu  *chain;
102 };
103
104 static DEFINE_SPINLOCK(tcp_metrics_lock);
105
106 static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
107 {
108         u32 val;
109
110         val = 0;
111         if (dst_metric_locked(dst, RTAX_RTT))
112                 val |= 1 << TCP_METRIC_RTT;
113         if (dst_metric_locked(dst, RTAX_RTTVAR))
114                 val |= 1 << TCP_METRIC_RTTVAR;
115         if (dst_metric_locked(dst, RTAX_SSTHRESH))
116                 val |= 1 << TCP_METRIC_SSTHRESH;
117         if (dst_metric_locked(dst, RTAX_CWND))
118                 val |= 1 << TCP_METRIC_CWND;
119         if (dst_metric_locked(dst, RTAX_REORDERING))
120                 val |= 1 << TCP_METRIC_REORDERING;
121         tm->tcpm_lock = val;
122
123         tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
124         tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
125         tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
126         tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
127         tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
128         tm->tcpm_ts = 0;
129         tm->tcpm_ts_stamp = 0;
130         tm->tcpm_fastopen.mss = 0;
131         tm->tcpm_fastopen.syn_loss = 0;
132         tm->tcpm_fastopen.cookie.len = 0;
133 }
134
135 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
136                                           struct inetpeer_addr *addr,
137                                           unsigned int hash,
138                                           bool reclaim)
139 {
140         struct tcp_metrics_block *tm;
141         struct net *net;
142
143         spin_lock_bh(&tcp_metrics_lock);
144         net = dev_net(dst->dev);
145         if (unlikely(reclaim)) {
146                 struct tcp_metrics_block *oldest;
147
148                 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
149                 for (tm = rcu_dereference(oldest->tcpm_next); tm;
150                      tm = rcu_dereference(tm->tcpm_next)) {
151                         if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
152                                 oldest = tm;
153                 }
154                 tm = oldest;
155         } else {
156                 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
157                 if (!tm)
158                         goto out_unlock;
159         }
160         tm->tcpm_addr = *addr;
161         tm->tcpm_stamp = jiffies;
162
163         tcpm_suck_dst(tm, dst);
164
165         if (likely(!reclaim)) {
166                 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
167                 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
168         }
169
170 out_unlock:
171         spin_unlock_bh(&tcp_metrics_lock);
172         return tm;
173 }
174
175 #define TCP_METRICS_TIMEOUT             (60 * 60 * HZ)
176
177 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
178 {
179         if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
180                 tcpm_suck_dst(tm, dst);
181 }
182
183 #define TCP_METRICS_RECLAIM_DEPTH       5
184 #define TCP_METRICS_RECLAIM_PTR         (struct tcp_metrics_block *) 0x1UL
185
186 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
187 {
188         if (tm)
189                 return tm;
190         if (depth > TCP_METRICS_RECLAIM_DEPTH)
191                 return TCP_METRICS_RECLAIM_PTR;
192         return NULL;
193 }
194
195 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
196                                                    struct net *net, unsigned int hash)
197 {
198         struct tcp_metrics_block *tm;
199         int depth = 0;
200
201         for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
202              tm = rcu_dereference(tm->tcpm_next)) {
203                 if (addr_same(&tm->tcpm_addr, addr))
204                         break;
205                 depth++;
206         }
207         return tcp_get_encode(tm, depth);
208 }
209
210 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
211                                                        struct dst_entry *dst)
212 {
213         struct tcp_metrics_block *tm;
214         struct inetpeer_addr addr;
215         unsigned int hash;
216         struct net *net;
217
218         addr.family = req->rsk_ops->family;
219         switch (addr.family) {
220         case AF_INET:
221                 addr.addr.a4 = inet_rsk(req)->rmt_addr;
222                 hash = (__force unsigned int) addr.addr.a4;
223                 break;
224         case AF_INET6:
225                 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
226                 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
227                 break;
228         default:
229                 return NULL;
230         }
231
232         net = dev_net(dst->dev);
233         hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
234
235         for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
236              tm = rcu_dereference(tm->tcpm_next)) {
237                 if (addr_same(&tm->tcpm_addr, &addr))
238                         break;
239         }
240         tcpm_check_stamp(tm, dst);
241         return tm;
242 }
243
244 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
245 {
246         struct inet6_timewait_sock *tw6;
247         struct tcp_metrics_block *tm;
248         struct inetpeer_addr addr;
249         unsigned int hash;
250         struct net *net;
251
252         addr.family = tw->tw_family;
253         switch (addr.family) {
254         case AF_INET:
255                 addr.addr.a4 = tw->tw_daddr;
256                 hash = (__force unsigned int) addr.addr.a4;
257                 break;
258         case AF_INET6:
259                 tw6 = inet6_twsk((struct sock *)tw);
260                 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
261                 hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
262                 break;
263         default:
264                 return NULL;
265         }
266
267         net = twsk_net(tw);
268         hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
269
270         for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
271              tm = rcu_dereference(tm->tcpm_next)) {
272                 if (addr_same(&tm->tcpm_addr, &addr))
273                         break;
274         }
275         return tm;
276 }
277
278 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
279                                                  struct dst_entry *dst,
280                                                  bool create)
281 {
282         struct tcp_metrics_block *tm;
283         struct inetpeer_addr addr;
284         unsigned int hash;
285         struct net *net;
286         bool reclaim;
287
288         addr.family = sk->sk_family;
289         switch (addr.family) {
290         case AF_INET:
291                 addr.addr.a4 = inet_sk(sk)->inet_daddr;
292                 hash = (__force unsigned int) addr.addr.a4;
293                 break;
294         case AF_INET6:
295                 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
296                 hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
297                 break;
298         default:
299                 return NULL;
300         }
301
302         net = dev_net(dst->dev);
303         hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
304
305         tm = __tcp_get_metrics(&addr, net, hash);
306         reclaim = false;
307         if (tm == TCP_METRICS_RECLAIM_PTR) {
308                 reclaim = true;
309                 tm = NULL;
310         }
311         if (!tm && create)
312                 tm = tcpm_new(dst, &addr, hash, reclaim);
313         else
314                 tcpm_check_stamp(tm, dst);
315
316         return tm;
317 }
318
319 /* Save metrics learned by this TCP session.  This function is called
320  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
321  * or goes from LAST-ACK to CLOSE.
322  */
323 void tcp_update_metrics(struct sock *sk)
324 {
325         const struct inet_connection_sock *icsk = inet_csk(sk);
326         struct dst_entry *dst = __sk_dst_get(sk);
327         struct tcp_sock *tp = tcp_sk(sk);
328         struct tcp_metrics_block *tm;
329         unsigned long rtt;
330         u32 val;
331         int m;
332
333         if (sysctl_tcp_nometrics_save || !dst)
334                 return;
335
336         if (dst->flags & DST_HOST)
337                 dst_confirm(dst);
338
339         rcu_read_lock();
340         if (icsk->icsk_backoff || !tp->srtt) {
341                 /* This session failed to estimate rtt. Why?
342                  * Probably, no packets returned in time.  Reset our
343                  * results.
344                  */
345                 tm = tcp_get_metrics(sk, dst, false);
346                 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
347                         tcp_metric_set(tm, TCP_METRIC_RTT, 0);
348                 goto out_unlock;
349         } else
350                 tm = tcp_get_metrics(sk, dst, true);
351
352         if (!tm)
353                 goto out_unlock;
354
355         rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
356         m = rtt - tp->srtt;
357
358         /* If newly calculated rtt larger than stored one, store new
359          * one. Otherwise, use EWMA. Remember, rtt overestimation is
360          * always better than underestimation.
361          */
362         if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
363                 if (m <= 0)
364                         rtt = tp->srtt;
365                 else
366                         rtt -= (m >> 3);
367                 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
368         }
369
370         if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
371                 unsigned long var;
372
373                 if (m < 0)
374                         m = -m;
375
376                 /* Scale deviation to rttvar fixed point */
377                 m >>= 1;
378                 if (m < tp->mdev)
379                         m = tp->mdev;
380
381                 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
382                 if (m >= var)
383                         var = m;
384                 else
385                         var -= (var - m) >> 2;
386
387                 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
388         }
389
390         if (tcp_in_initial_slowstart(tp)) {
391                 /* Slow start still did not finish. */
392                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
393                         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
394                         if (val && (tp->snd_cwnd >> 1) > val)
395                                 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
396                                                tp->snd_cwnd >> 1);
397                 }
398                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
399                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
400                         if (tp->snd_cwnd > val)
401                                 tcp_metric_set(tm, TCP_METRIC_CWND,
402                                                tp->snd_cwnd);
403                 }
404         } else if (tp->snd_cwnd > tp->snd_ssthresh &&
405                    icsk->icsk_ca_state == TCP_CA_Open) {
406                 /* Cong. avoidance phase, cwnd is reliable. */
407                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
408                         tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
409                                        max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
410                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
411                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
412                         tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
413                 }
414         } else {
415                 /* Else slow start did not finish, cwnd is non-sense,
416                  * ssthresh may be also invalid.
417                  */
418                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
419                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
420                         tcp_metric_set(tm, TCP_METRIC_CWND,
421                                        (val + tp->snd_ssthresh) >> 1);
422                 }
423                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
424                         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
425                         if (val && tp->snd_ssthresh > val)
426                                 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
427                                                tp->snd_ssthresh);
428                 }
429                 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
430                         val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
431                         if (val < tp->reordering &&
432                             tp->reordering != sysctl_tcp_reordering)
433                                 tcp_metric_set(tm, TCP_METRIC_REORDERING,
434                                                tp->reordering);
435                 }
436         }
437         tm->tcpm_stamp = jiffies;
438 out_unlock:
439         rcu_read_unlock();
440 }
441
442 /* Initialize metrics on socket. */
443
444 void tcp_init_metrics(struct sock *sk)
445 {
446         struct dst_entry *dst = __sk_dst_get(sk);
447         struct tcp_sock *tp = tcp_sk(sk);
448         struct tcp_metrics_block *tm;
449         u32 val;
450
451         if (dst == NULL)
452                 goto reset;
453
454         dst_confirm(dst);
455
456         rcu_read_lock();
457         tm = tcp_get_metrics(sk, dst, true);
458         if (!tm) {
459                 rcu_read_unlock();
460                 goto reset;
461         }
462
463         if (tcp_metric_locked(tm, TCP_METRIC_CWND))
464                 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
465
466         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
467         if (val) {
468                 tp->snd_ssthresh = val;
469                 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
470                         tp->snd_ssthresh = tp->snd_cwnd_clamp;
471         } else {
472                 /* ssthresh may have been reduced unnecessarily during.
473                  * 3WHS. Restore it back to its initial default.
474                  */
475                 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
476         }
477         val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
478         if (val && tp->reordering != val) {
479                 tcp_disable_fack(tp);
480                 tcp_disable_early_retrans(tp);
481                 tp->reordering = val;
482         }
483
484         val = tcp_metric_get(tm, TCP_METRIC_RTT);
485         if (val == 0 || tp->srtt == 0) {
486                 rcu_read_unlock();
487                 goto reset;
488         }
489         /* Initial rtt is determined from SYN,SYN-ACK.
490          * The segment is small and rtt may appear much
491          * less than real one. Use per-dst memory
492          * to make it more realistic.
493          *
494          * A bit of theory. RTT is time passed after "normal" sized packet
495          * is sent until it is ACKed. In normal circumstances sending small
496          * packets force peer to delay ACKs and calculation is correct too.
497          * The algorithm is adaptive and, provided we follow specs, it
498          * NEVER underestimate RTT. BUT! If peer tries to make some clever
499          * tricks sort of "quick acks" for time long enough to decrease RTT
500          * to low value, and then abruptly stops to do it and starts to delay
501          * ACKs, wait for troubles.
502          */
503         val = msecs_to_jiffies(val);
504         if (val > tp->srtt) {
505                 tp->srtt = val;
506                 tp->rtt_seq = tp->snd_nxt;
507         }
508         val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
509         if (val > tp->mdev) {
510                 tp->mdev = val;
511                 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
512         }
513         rcu_read_unlock();
514
515         tcp_set_rto(sk);
516 reset:
517         if (tp->srtt == 0) {
518                 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
519                  * 3WHS. This is most likely due to retransmission,
520                  * including spurious one. Reset the RTO back to 3secs
521                  * from the more aggressive 1sec to avoid more spurious
522                  * retransmission.
523                  */
524                 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
525                 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
526         }
527         /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
528          * retransmitted. In light of RFC6298 more aggressive 1sec
529          * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
530          * retransmission has occurred.
531          */
532         if (tp->total_retrans > 1)
533                 tp->snd_cwnd = 1;
534         else
535                 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
536         tp->snd_cwnd_stamp = tcp_time_stamp;
537 }
538
539 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
540 {
541         struct tcp_metrics_block *tm;
542         bool ret;
543
544         if (!dst)
545                 return false;
546
547         rcu_read_lock();
548         tm = __tcp_get_metrics_req(req, dst);
549         if (paws_check) {
550                 if (tm &&
551                     (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
552                     (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
553                         ret = false;
554                 else
555                         ret = true;
556         } else {
557                 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
558                         ret = true;
559                 else
560                         ret = false;
561         }
562         rcu_read_unlock();
563
564         return ret;
565 }
566 EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
567
568 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
569 {
570         struct tcp_metrics_block *tm;
571
572         rcu_read_lock();
573         tm = tcp_get_metrics(sk, dst, true);
574         if (tm) {
575                 struct tcp_sock *tp = tcp_sk(sk);
576
577                 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
578                         tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
579                         tp->rx_opt.ts_recent = tm->tcpm_ts;
580                 }
581         }
582         rcu_read_unlock();
583 }
584 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
585
586 /* VJ's idea. Save last timestamp seen from this destination and hold
587  * it at least for normal timewait interval to use for duplicate
588  * segment detection in subsequent connections, before they enter
589  * synchronized state.
590  */
591 bool tcp_remember_stamp(struct sock *sk)
592 {
593         struct dst_entry *dst = __sk_dst_get(sk);
594         bool ret = false;
595
596         if (dst) {
597                 struct tcp_metrics_block *tm;
598
599                 rcu_read_lock();
600                 tm = tcp_get_metrics(sk, dst, true);
601                 if (tm) {
602                         struct tcp_sock *tp = tcp_sk(sk);
603
604                         if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
605                             ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
606                              tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
607                                 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
608                                 tm->tcpm_ts = tp->rx_opt.ts_recent;
609                         }
610                         ret = true;
611                 }
612                 rcu_read_unlock();
613         }
614         return ret;
615 }
616
617 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
618 {
619         struct tcp_metrics_block *tm;
620         bool ret = false;
621
622         rcu_read_lock();
623         tm = __tcp_get_metrics_tw(tw);
624         if (tw) {
625                 const struct tcp_timewait_sock *tcptw;
626                 struct sock *sk = (struct sock *) tw;
627
628                 tcptw = tcp_twsk(sk);
629                 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
630                     ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
631                      tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
632                         tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
633                         tm->tcpm_ts        = tcptw->tw_ts_recent;
634                 }
635                 ret = true;
636         }
637         rcu_read_unlock();
638
639         return ret;
640 }
641
642 static DEFINE_SEQLOCK(fastopen_seqlock);
643
644 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
645                             struct tcp_fastopen_cookie *cookie,
646                             int *syn_loss, unsigned long *last_syn_loss)
647 {
648         struct tcp_metrics_block *tm;
649
650         rcu_read_lock();
651         tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
652         if (tm) {
653                 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
654                 unsigned int seq;
655
656                 do {
657                         seq = read_seqbegin(&fastopen_seqlock);
658                         if (tfom->mss)
659                                 *mss = tfom->mss;
660                         *cookie = tfom->cookie;
661                         *syn_loss = tfom->syn_loss;
662                         *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
663                 } while (read_seqretry(&fastopen_seqlock, seq));
664         }
665         rcu_read_unlock();
666 }
667
668 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
669                             struct tcp_fastopen_cookie *cookie, bool syn_lost)
670 {
671         struct tcp_metrics_block *tm;
672
673         rcu_read_lock();
674         tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
675         if (tm) {
676                 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
677
678                 write_seqlock_bh(&fastopen_seqlock);
679                 tfom->mss = mss;
680                 if (cookie->len > 0)
681                         tfom->cookie = *cookie;
682                 if (syn_lost) {
683                         ++tfom->syn_loss;
684                         tfom->last_syn_loss = jiffies;
685                 } else
686                         tfom->syn_loss = 0;
687                 write_sequnlock_bh(&fastopen_seqlock);
688         }
689         rcu_read_unlock();
690 }
691
692 static unsigned int tcpmhash_entries;
693 static int __init set_tcpmhash_entries(char *str)
694 {
695         ssize_t ret;
696
697         if (!str)
698                 return 0;
699
700         ret = kstrtouint(str, 0, &tcpmhash_entries);
701         if (ret)
702                 return 0;
703
704         return 1;
705 }
706 __setup("tcpmhash_entries=", set_tcpmhash_entries);
707
708 static int __net_init tcp_net_metrics_init(struct net *net)
709 {
710         size_t size;
711         unsigned int slots;
712
713         slots = tcpmhash_entries;
714         if (!slots) {
715                 if (totalram_pages >= 128 * 1024)
716                         slots = 16 * 1024;
717                 else
718                         slots = 8 * 1024;
719         }
720
721         net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
722         size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
723
724         net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
725         if (!net->ipv4.tcp_metrics_hash)
726                 return -ENOMEM;
727
728         return 0;
729 }
730
731 static void __net_exit tcp_net_metrics_exit(struct net *net)
732 {
733         kfree(net->ipv4.tcp_metrics_hash);
734 }
735
736 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
737         .init   =       tcp_net_metrics_init,
738         .exit   =       tcp_net_metrics_exit,
739 };
740
741 void __init tcp_metrics_init(void)
742 {
743         register_pernet_subsys(&tcp_net_metrics_ops);
744 }