aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2022-10-12 15:42:06 +0100
committerDavid Howells <dhowells@redhat.com>2022-12-01 13:36:41 +0000
commit29fb4ec385f18db98d9188c2173a0b07d2de6917 (patch)
tree5421332d90da716ec1aaf36574cfe68af7f994e2 /net/rxrpc
parentcf37b5987508878647161ec3cdba0bb00a1b607a (diff)
downloadlinux-29fb4ec385f18db98d9188c2173a0b07d2de6917.tar.gz
rxrpc: Remove RCU from peer->error_targets list
Remove the RCU requirements from the peer's list of error targets so that the error distributor can call sleeping functions. Signed-off-by: David Howells <dhowells@redhat.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org
Diffstat (limited to 'net/rxrpc')
-rw-r--r--net/rxrpc/call_accept.c6
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/conn_client.c4
-rw-r--r--net/rxrpc/conn_object.c6
-rw-r--r--net/rxrpc/output.c6
-rw-r--r--net/rxrpc/peer_event.c15
6 files changed, 28 insertions, 11 deletions
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 8bc327aa2beb9..5f978b0b2404c 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -433,6 +433,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
*/
rxrpc_put_call(call, rxrpc_call_put_discard_prealloc);
+ if (hlist_unhashed(&call->error_link)) {
+ spin_lock(&call->peer->lock);
+ hlist_add_head(&call->error_link, &call->peer->error_targets);
+ spin_unlock(&call->peer->lock);
+ }
+
_leave(" = %p{%d}", call, call->debug_id);
return call;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 96a7edd3a8422..7570b4e67bc59 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -442,7 +442,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
rcu_assign_pointer(conn->channels[chan].call, call);
spin_lock(&conn->peer->lock);
- hlist_add_head_rcu(&call->error_link, &conn->peer->error_targets);
+ hlist_add_head(&call->error_link, &conn->peer->error_targets);
spin_unlock(&conn->peer->lock);
rxrpc_start_call_timer(call);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index ab3dd22fadc0c..3c7b1bdec0dbc 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -786,6 +786,10 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
if (chan->call_counter >= INT_MAX)
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
+
+ spin_lock(&call->peer->lock);
+ hlist_add_head(&call->error_link, &call->peer->error_targets);
+ spin_unlock(&call->peer->lock);
}
}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index c2e05ea29f122..5a39255ea014d 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -215,9 +215,9 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
call->peer->cong_ssthresh = call->cong_ssthresh;
if (!hlist_unhashed(&call->error_link)) {
- spin_lock_bh(&call->peer->lock);
- hlist_del_rcu(&call->error_link);
- spin_unlock_bh(&call->peer->lock);
+ spin_lock(&call->peer->lock);
+ hlist_del_init(&call->error_link);
+ spin_unlock(&call->peer->lock);
}
if (rxrpc_is_client_call(call))
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index c8147e50060ba..71963b4523bea 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -394,12 +394,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
_enter("%x,{%d}", txb->seq, txb->len);
- if (hlist_unhashed(&call->error_link)) {
- spin_lock_bh(&call->peer->lock);
- hlist_add_head_rcu(&call->error_link, &call->peer->error_targets);
- spin_unlock_bh(&call->peer->lock);
- }
-
/* Each transmission of a Tx packet needs a new serial number */
serial = atomic_inc_return(&conn->serial);
txb->wire.serial = htonl(serial);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 94f63fb1bd672..97d017ca3dc47 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -207,11 +207,24 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
enum rxrpc_call_completion compl)
{
struct rxrpc_call *call;
+ HLIST_HEAD(error_targets);
+
+ spin_lock(&peer->lock);
+ hlist_move_list(&peer->error_targets, &error_targets);
+
+ while (!hlist_empty(&error_targets)) {
+ call = hlist_entry(error_targets.first,
+ struct rxrpc_call, error_link);
+ hlist_del_init(&call->error_link);
+ spin_unlock(&peer->lock);
- hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
rxrpc_see_call(call, rxrpc_call_see_distribute_error);
rxrpc_set_call_completion(call, compl, 0, -error);
+
+ spin_lock(&peer->lock);
}
+
+ spin_unlock(&peer->lock);
}
/*