tcp: Fix data-races around sysctl_tcp_recovery.
authorKuniyuki Iwashima <kuniyu@amazon.com>
Mon, 18 Jul 2022 17:26:46 +0000 (10:26 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 20 Jul 2022 09:14:50 +0000 (10:14 +0100)
While reading sysctl_tcp_recovery, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its readers.

Fixes: 4f41b1c58a32 ("tcp: use RACK to detect losses")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/tcp_input.c
net/ipv4/tcp_recovery.c

index 92626e15115c558abc369563e34ce8b5868fae40..36eabd109e7c76e0eceabdfa0aa36f5300d5ef32 100644 (file)
@@ -2095,7 +2095,8 @@ static inline void tcp_init_undo(struct tcp_sock *tp)
 
 static bool tcp_is_rack(const struct sock *sk)
 {
-       return sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION;
+       return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
+               TCP_RACK_LOSS_DETECTION;
 }
 
 /* If we detect SACK reneging, forget all SACK information
index 48f30e7209f291c9191074e5f4a11501a374961c..50abaa941387d3e7328b6859ea5118937fc12be4 100644 (file)
@@ -14,7 +14,8 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
                        return 0;
 
                if (tp->sacked_out >= tp->reordering &&
-                   !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
+                   !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
+                     TCP_RACK_NO_DUPTHRESH))
                        return 0;
        }
 
@@ -187,7 +188,8 @@ void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
+       if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
+            TCP_RACK_STATIC_REO_WND) ||
            !rs->prior_delivered)
                return;