tcp: add tp->dup_ack_counter
authorEric Dumazet <edumazet@google.com>
Thu, 30 Apr 2020 17:35:41 +0000 (10:35 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 30 Apr 2020 20:24:01 +0000 (13:24 -0700)
In commit 86de5921a3d5 ("tcp: defer SACK compression after DupThresh")
I added a TCP_FASTRETRANS_THRESH bias to tp->compressed_ack in order
to enable sack compression only after 3 dupacks.

Since we plan to relax this rule for flows that involve
stacks not requiring this old rule, this patch adds
a distinct tp->dup_ack_counter.

This means the TCP_FASTRETRANS_THRESH value is now used
in a single location that a future patch can adjust:

if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) {
tp->dup_ack_counter++;
goto send_now;
}

This patch also introduces tcp_sack_compress_send_ack()
helper to ease following patch comprehension.

This patch refines LINUX_MIB_TCPACKCOMPRESSED to not
count the acks that we had to send if the timer expires
or tcp_sack_compress_send_ack() is sending an ack.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/tcp.h
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c

index 421c99c..2c6f87e 100644 (file)
@@ -268,6 +268,7 @@ struct tcp_sock {
        } rack;
        u16     advmss;         /* Advertised MSS                       */
        u8      compressed_ack;
+       u8      dup_ack_counter;
        u32     chrono_start;   /* Start time in jiffies of a TCP chrono */
        u32     chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
        u8      chrono_type:2,  /* current chronograph type */
index bf4ced9..da777df 100644 (file)
@@ -4327,6 +4327,27 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
        }
 }
 
+static void tcp_sack_compress_send_ack(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (!tp->compressed_ack)
+               return;
+
+       if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
+               __sock_put(sk);
+
+       /* Since we have to send one ack finally,
+        * substract one from tp->compressed_ack to keep
+        * LINUX_MIB_TCPACKCOMPRESSED accurate.
+        */
+       NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
+                     tp->compressed_ack - 1);
+
+       tp->compressed_ack = 0;
+       tcp_send_ack(sk);
+}
+
 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -4355,8 +4376,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
         * If the sack array is full, forget about the last one.
         */
        if (this_sack >= TCP_NUM_SACKS) {
-               if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
-                       tcp_send_ack(sk);
+               tcp_sack_compress_send_ack(sk);
                this_sack--;
                tp->rx_opt.num_sacks--;
                sp--;
@@ -5275,15 +5295,13 @@ send_now:
 
        if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
                tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
-               if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
-                       NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
-                                     tp->compressed_ack - TCP_FASTRETRANS_THRESH);
-               tp->compressed_ack = 0;
+               tp->dup_ack_counter = 0;
        }
-
-       if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH)
+       if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) {
+               tp->dup_ack_counter++;
                goto send_now;
-
+       }
+       tp->compressed_ack++;
        if (hrtimer_is_queued(&tp->compressed_ack_timer))
                return;
 
index ba44821..c414aeb 100644 (file)
@@ -184,10 +184,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
+       if (unlikely(tp->compressed_ack)) {
                NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
-                             tp->compressed_ack - TCP_FASTRETRANS_THRESH);
-               tp->compressed_ack = TCP_FASTRETRANS_THRESH;
+                             tp->compressed_ack);
+               tp->compressed_ack = 0;
                if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
                        __sock_put(sk);
        }
index c3f26dc..ada046f 100644 (file)
@@ -753,8 +753,14 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
 
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk)) {
-               if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
+               if (tp->compressed_ack) {
+                       /* Since we have to send one ack finally,
+                        * substract one from tp->compressed_ack to keep
+                        * LINUX_MIB_TCPACKCOMPRESSED accurate.
+                        */
+                       tp->compressed_ack--;
                        tcp_send_ack(sk);
+               }
        } else {
                if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
                                      &sk->sk_tsq_flags))