From 29fb4ec385f18db98d9188c2173a0b07d2de6917 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Oct 2022 15:42:06 +0100 Subject: [PATCH] rxrpc: Remove RCU from peer->error_targets list Remove the RCU requirements from the peer's list of error targets so that the error distributor can call sleeping functions. Signed-off-by: David Howells cc: Marc Dionne cc: linux-afs@lists.infradead.org --- net/rxrpc/call_accept.c | 6 ++++++ net/rxrpc/call_object.c | 2 +- net/rxrpc/conn_client.c | 4 ++++ net/rxrpc/conn_object.c | 6 +++--- net/rxrpc/output.c | 6 ------ net/rxrpc/peer_event.c | 15 ++++++++++++++- 6 files changed, 28 insertions(+), 11 deletions(-) diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 8bc327a..5f978b0 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -433,6 +433,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, */ rxrpc_put_call(call, rxrpc_call_put_discard_prealloc); + if (hlist_unhashed(&call->error_link)) { + spin_lock(&call->peer->lock); + hlist_add_head(&call->error_link, &call->peer->error_targets); + spin_unlock(&call->peer->lock); + } + _leave(" = %p{%d}", call, call->debug_id); return call; diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 96a7edd..7570b4e 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -442,7 +442,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx, rcu_assign_pointer(conn->channels[chan].call, call); spin_lock(&conn->peer->lock); - hlist_add_head_rcu(&call->error_link, &conn->peer->error_targets); + hlist_add_head(&call->error_link, &conn->peer->error_targets); spin_unlock(&conn->peer->lock); rxrpc_start_call_timer(call); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index ab3dd22..3c7b1bd 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -786,6 +786,10 @@ void rxrpc_expose_client_call(struct rxrpc_call *call) if (chan->call_counter >= INT_MAX) set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); trace_rxrpc_client(conn, channel, rxrpc_client_exposed); + + spin_lock(&call->peer->lock); + hlist_add_head(&call->error_link, &call->peer->error_targets); + spin_unlock(&call->peer->lock); } } diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index c2e05ea..5a39255 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -215,9 +215,9 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) call->peer->cong_ssthresh = call->cong_ssthresh; if (!hlist_unhashed(&call->error_link)) { - spin_lock_bh(&call->peer->lock); - hlist_del_rcu(&call->error_link); - spin_unlock_bh(&call->peer->lock); + spin_lock(&call->peer->lock); + hlist_del_init(&call->error_link); + spin_unlock(&call->peer->lock); } if (rxrpc_is_client_call(call)) diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index c8147e5..71963b4 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -394,12 +394,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb) _enter("%x,{%d}", txb->seq, txb->len); - if (hlist_unhashed(&call->error_link)) { - spin_lock_bh(&call->peer->lock); - hlist_add_head_rcu(&call->error_link, &call->peer->error_targets); - spin_unlock_bh(&call->peer->lock); - } - /* Each transmission of a Tx packet needs a new serial number */ serial = atomic_inc_return(&conn->serial); txb->wire.serial = htonl(serial); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 94f63fb..97d017c 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -207,11 +207,24 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error, enum rxrpc_call_completion compl) { struct rxrpc_call *call; + HLIST_HEAD(error_targets); + + spin_lock(&peer->lock); + hlist_move_list(&peer->error_targets, &error_targets); + + while (!hlist_empty(&error_targets)) { + call = hlist_entry(error_targets.first, + struct rxrpc_call, error_link); + hlist_del_init(&call->error_link); + spin_unlock(&peer->lock); - hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) { rxrpc_see_call(call, rxrpc_call_see_distribute_error); rxrpc_set_call_completion(call, compl, 0, -error); + + spin_lock(&peer->lock); } + + spin_unlock(&peer->lock); } /* -- 2.7.4