rxrpc: Don't lock call->tx_lock to access call->tx_buffer
authorDavid Howells <dhowells@redhat.com>
Mon, 17 Oct 2022 21:48:58 +0000 (22:48 +0100)
committerDavid Howells <dhowells@redhat.com>
Tue, 31 Jan 2023 16:38:35 +0000 (16:38 +0000)
call->tx_buffer is now only accessed within the I/O thread (->tx_sendmsg is
the way sendmsg passes packets to the I/O thread) so there's no need to
lock around it.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org

net/rxrpc/txbuf.c

index d2cf2aac3adb0141c2e4ac4ae0466d3544119f8b..d43be85123864092b3654b06f886f19cc6e194ef 100644 (file)
@@ -110,12 +110,8 @@ void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *call)
 
        _enter("%x/%x/%x", call->tx_bottom, call->acks_hard_ack, call->tx_top);
 
-       for (;;) {
-               spin_lock(&call->tx_lock);
-               txb = list_first_entry_or_null(&call->tx_buffer,
-                                              struct rxrpc_txbuf, call_link);
-               if (!txb)
-                       break;
+       while ((txb = list_first_entry_or_null(&call->tx_buffer,
+                                              struct rxrpc_txbuf, call_link))) {
                hard_ack = smp_load_acquire(&call->acks_hard_ack);
                if (before(hard_ack, txb->seq))
                        break;
@@ -128,15 +124,11 @@ void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *call)
 
                trace_rxrpc_txqueue(call, rxrpc_txqueue_dequeue);
 
-               spin_unlock(&call->tx_lock);
-
                rxrpc_put_txbuf(txb, rxrpc_txbuf_put_rotated);
                if (after(call->acks_hard_ack, call->tx_bottom + 128))
                        wake = true;
        }
 
-       spin_unlock(&call->tx_lock);
-
        if (wake)
                wake_up(&call->waitq);
 }