tcp: add exponential backoff in __tcp_send_ack()
authorEric Dumazet <edumazet@google.com>
Wed, 30 Sep 2020 12:54:57 +0000 (05:54 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 30 Sep 2020 21:21:30 +0000 (14:21 -0700)
Whenever host is under very high memory pressure,
__tcp_send_ack() skb allocation fails, and we setup
a 200 ms (TCP_DELACK_MAX) timer before retrying.

On hosts with high number of TCP sockets, we can spend
considerable amount of cpu cycles in these attempts,
add high pressure on various spinlocks in mm-layer,
ultimately blocking threads attempting to free space
from making any progress.

This patch adds standard exponential backoff to avoid
adding fuel to the fire.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/inet_connection_sock.h
net/ipv4/tcp_output.c

index 79875f976190750819948425e63dd0309c699050..7338b3865a2a3d278dc27c0167bba1b966bbda9f 100644 (file)
@@ -110,7 +110,7 @@ struct inet_connection_sock {
                __u8              pending;       /* ACK is pending                         */
                __u8              quick;         /* Scheduled number of quick acks         */
                __u8              pingpong;      /* The session is interactive             */
-               /* one byte hole. */
+               __u8              retry;         /* Number of attempts                     */
                __u32             ato;           /* Predicted tick of soft clock           */
                unsigned long     timeout;       /* Currently scheduled timeout            */
                __u32             lrcvtime;      /* timestamp of last received data packet */
@@ -199,6 +199,7 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
 #endif
        } else if (what == ICSK_TIME_DACK) {
                icsk->icsk_ack.pending = 0;
+               icsk->icsk_ack.retry = 0;
 #ifdef INET_CSK_CLEAR_TIMERS
                sk_stop_timer(sk, &icsk->icsk_delack_timer);
 #endif
index 6bd4e383030ea20441332a30e98fbda8cd90f84a..bf48cd73e96787a14b6f9af8beddb1067a7cb8dc 100644 (file)
@@ -3941,10 +3941,15 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
        buff = alloc_skb(MAX_TCP_HEADER,
                         sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
        if (unlikely(!buff)) {
+               struct inet_connection_sock *icsk = inet_csk(sk);
+               unsigned long delay;
+
+               delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
+               if (delay < TCP_RTO_MAX)
+                       icsk->icsk_ack.retry++;
                inet_csk_schedule_ack(sk);
-               inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
-               inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
-                                         TCP_DELACK_MAX, TCP_RTO_MAX);
+               icsk->icsk_ack.ato = TCP_ATO_MIN;
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
                return;
        }