rxrpc: Generate extra pings for RTT during heavy-receive call
authorDavid Howells <dhowells@redhat.com>
Mon, 17 Oct 2022 09:55:41 +0000 (10:55 +0100)
committerDavid Howells <dhowells@redhat.com>
Tue, 31 Jan 2023 16:38:10 +0000 (16:38 +0000)
When doing a call that has a single transmitted data packet and a massive
amount of received data packets, we only ping for one RTT sample, which
means we don't get a good reading on it.

Fix this by converting occasional IDLE ACKs into PING ACKs to elicit a
response.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org

include/trace/events/rxrpc.h
net/rxrpc/call_event.c
net/rxrpc/output.c

index cdcadb1..450b8f3 100644 (file)
        EM(rxrpc_propose_ack_client_tx_end,     "ClTxEnd") \
        EM(rxrpc_propose_ack_input_data,        "DataIn ") \
        EM(rxrpc_propose_ack_input_data_hole,   "DataInH") \
-       EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
        EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
        EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
        EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
+       EM(rxrpc_propose_ack_ping_for_old_rtt,  "OldRtt ") \
        EM(rxrpc_propose_ack_ping_for_params,   "Params ") \
+       EM(rxrpc_propose_ack_ping_for_rtt,      "Rtt    ") \
        EM(rxrpc_propose_ack_processing_op,     "ProcOp ") \
        EM(rxrpc_propose_ack_respond_to_ack,    "Rsp2Ack") \
        EM(rxrpc_propose_ack_respond_to_ping,   "Rsp2Png") \
index 1abdef1..cf9799b 100644 (file)
@@ -498,9 +498,18 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
                rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
                               rxrpc_propose_ack_rx_idle);
 
-       if (atomic_read(&call->ackr_nr_unacked) > 2)
-               rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
-                              rxrpc_propose_ack_input_data);
+       if (atomic_read(&call->ackr_nr_unacked) > 2) {
+               if (call->peer->rtt_count < 3)
+                       rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+                                      rxrpc_propose_ack_ping_for_rtt);
+               else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
+                                     ktime_get_real()))
+                       rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+                                      rxrpc_propose_ack_ping_for_old_rtt);
+               else
+                       rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
+                                      rxrpc_propose_ack_input_data);
+       }
 
        /* Make sure the timer is restarted */
        if (!__rxrpc_call_is_complete(call)) {
index a9746be..98b5d0d 100644 (file)
@@ -253,12 +253,15 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
        iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
        ret = do_udp_sendmsg(conn->local->socket, &msg, len);
        call->peer->last_tx_at = ktime_get_seconds();
-       if (ret < 0)
+       if (ret < 0) {
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
                                    rxrpc_tx_point_call_ack);
-       else
+       } else {
                trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
                                      rxrpc_tx_point_call_ack);
+               if (txb->wire.flags & RXRPC_REQUEST_ACK)
+                       call->peer->rtt_last_req = ktime_get_real();
+       }
        rxrpc_tx_backoff(call, ret);
 
        if (!__rxrpc_call_is_complete(call)) {