rxrpc: Remove RCU from peer->error_targets list
authorDavid Howells <dhowells@redhat.com>
Wed, 12 Oct 2022 14:42:06 +0000 (15:42 +0100)
committerDavid Howells <dhowells@redhat.com>
Thu, 1 Dec 2022 13:36:41 +0000 (13:36 +0000)
Remove the RCU requirements from the peer's list of error targets so that
the error distributor can call sleeping functions.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org

net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_object.c
net/rxrpc/output.c
net/rxrpc/peer_event.c

index 8bc327a..5f978b0 100644 (file)
@@ -433,6 +433,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
         */
        rxrpc_put_call(call, rxrpc_call_put_discard_prealloc);
 
+       if (hlist_unhashed(&call->error_link)) {
+               spin_lock(&call->peer->lock);
+               hlist_add_head(&call->error_link, &call->peer->error_targets);
+               spin_unlock(&call->peer->lock);
+       }
+
        _leave(" = %p{%d}", call, call->debug_id);
        return call;
 
index 96a7edd..7570b4e 100644 (file)
@@ -442,7 +442,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
        rcu_assign_pointer(conn->channels[chan].call, call);
 
        spin_lock(&conn->peer->lock);
-       hlist_add_head_rcu(&call->error_link, &conn->peer->error_targets);
+       hlist_add_head(&call->error_link, &conn->peer->error_targets);
        spin_unlock(&conn->peer->lock);
 
        rxrpc_start_call_timer(call);
index ab3dd22..3c7b1bd 100644 (file)
@@ -786,6 +786,10 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
                if (chan->call_counter >= INT_MAX)
                        set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
                trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
+
+               spin_lock(&call->peer->lock);
+               hlist_add_head(&call->error_link, &call->peer->error_targets);
+               spin_unlock(&call->peer->lock);
        }
 }
 
index c2e05ea..5a39255 100644 (file)
@@ -215,9 +215,9 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
        call->peer->cong_ssthresh = call->cong_ssthresh;
 
        if (!hlist_unhashed(&call->error_link)) {
-               spin_lock_bh(&call->peer->lock);
-               hlist_del_rcu(&call->error_link);
-               spin_unlock_bh(&call->peer->lock);
+               spin_lock(&call->peer->lock);
+               hlist_del_init(&call->error_link);
+               spin_unlock(&call->peer->lock);
        }
 
        if (rxrpc_is_client_call(call))
index c8147e5..71963b4 100644 (file)
@@ -394,12 +394,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
 
        _enter("%x,{%d}", txb->seq, txb->len);
 
-       if (hlist_unhashed(&call->error_link)) {
-               spin_lock_bh(&call->peer->lock);
-               hlist_add_head_rcu(&call->error_link, &call->peer->error_targets);
-               spin_unlock_bh(&call->peer->lock);
-       }
-
        /* Each transmission of a Tx packet needs a new serial number */
        serial = atomic_inc_return(&conn->serial);
        txb->wire.serial = htonl(serial);
index 94f63fb..97d017c 100644 (file)
@@ -207,11 +207,24 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
                                   enum rxrpc_call_completion compl)
 {
        struct rxrpc_call *call;
+       HLIST_HEAD(error_targets);
+
+       spin_lock(&peer->lock);
+       hlist_move_list(&peer->error_targets, &error_targets);
+
+       while (!hlist_empty(&error_targets)) {
+               call = hlist_entry(error_targets.first,
+                                  struct rxrpc_call, error_link);
+               hlist_del_init(&call->error_link);
+               spin_unlock(&peer->lock);
 
-       hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
                rxrpc_see_call(call, rxrpc_call_see_distribute_error);
                rxrpc_set_call_completion(call, compl, 0, -error);
+
+               spin_lock(&peer->lock);
        }
+
+       spin_unlock(&peer->lock);
 }
 
 /*