rxrpc: Remove RCU from peer->error_targets list
Remove the RCU requirements from the peer's list of error targets so that the error distributor can call sleeping functions. Signed-off-by: David Howells <dhowells@redhat.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org
This commit is contained in:
parent
cf37b59875
commit
29fb4ec385
|
@ -433,6 +433,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||||
*/
|
*/
|
||||||
rxrpc_put_call(call, rxrpc_call_put_discard_prealloc);
|
rxrpc_put_call(call, rxrpc_call_put_discard_prealloc);
|
||||||
|
|
||||||
|
if (hlist_unhashed(&call->error_link)) {
|
||||||
|
spin_lock(&call->peer->lock);
|
||||||
|
hlist_add_head(&call->error_link, &call->peer->error_targets);
|
||||||
|
spin_unlock(&call->peer->lock);
|
||||||
|
}
|
||||||
|
|
||||||
_leave(" = %p{%d}", call, call->debug_id);
|
_leave(" = %p{%d}", call, call->debug_id);
|
||||||
return call;
|
return call;
|
||||||
|
|
||||||
|
|
|
@ -442,7 +442,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
|
||||||
rcu_assign_pointer(conn->channels[chan].call, call);
|
rcu_assign_pointer(conn->channels[chan].call, call);
|
||||||
|
|
||||||
spin_lock(&conn->peer->lock);
|
spin_lock(&conn->peer->lock);
|
||||||
hlist_add_head_rcu(&call->error_link, &conn->peer->error_targets);
|
hlist_add_head(&call->error_link, &conn->peer->error_targets);
|
||||||
spin_unlock(&conn->peer->lock);
|
spin_unlock(&conn->peer->lock);
|
||||||
|
|
||||||
rxrpc_start_call_timer(call);
|
rxrpc_start_call_timer(call);
|
||||||
|
|
|
@ -786,6 +786,10 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
|
||||||
if (chan->call_counter >= INT_MAX)
|
if (chan->call_counter >= INT_MAX)
|
||||||
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
|
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
|
||||||
trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
|
trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
|
||||||
|
|
||||||
|
spin_lock(&call->peer->lock);
|
||||||
|
hlist_add_head(&call->error_link, &call->peer->error_targets);
|
||||||
|
spin_unlock(&call->peer->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -215,9 +215,9 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
|
||||||
call->peer->cong_ssthresh = call->cong_ssthresh;
|
call->peer->cong_ssthresh = call->cong_ssthresh;
|
||||||
|
|
||||||
if (!hlist_unhashed(&call->error_link)) {
|
if (!hlist_unhashed(&call->error_link)) {
|
||||||
spin_lock_bh(&call->peer->lock);
|
spin_lock(&call->peer->lock);
|
||||||
hlist_del_rcu(&call->error_link);
|
hlist_del_init(&call->error_link);
|
||||||
spin_unlock_bh(&call->peer->lock);
|
spin_unlock(&call->peer->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rxrpc_is_client_call(call))
|
if (rxrpc_is_client_call(call))
|
||||||
|
|
|
@ -394,12 +394,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
|
||||||
|
|
||||||
_enter("%x,{%d}", txb->seq, txb->len);
|
_enter("%x,{%d}", txb->seq, txb->len);
|
||||||
|
|
||||||
if (hlist_unhashed(&call->error_link)) {
|
|
||||||
spin_lock_bh(&call->peer->lock);
|
|
||||||
hlist_add_head_rcu(&call->error_link, &call->peer->error_targets);
|
|
||||||
spin_unlock_bh(&call->peer->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Each transmission of a Tx packet needs a new serial number */
|
/* Each transmission of a Tx packet needs a new serial number */
|
||||||
serial = atomic_inc_return(&conn->serial);
|
serial = atomic_inc_return(&conn->serial);
|
||||||
txb->wire.serial = htonl(serial);
|
txb->wire.serial = htonl(serial);
|
||||||
|
|
|
@ -207,11 +207,24 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
|
||||||
enum rxrpc_call_completion compl)
|
enum rxrpc_call_completion compl)
|
||||||
{
|
{
|
||||||
struct rxrpc_call *call;
|
struct rxrpc_call *call;
|
||||||
|
HLIST_HEAD(error_targets);
|
||||||
|
|
||||||
|
spin_lock(&peer->lock);
|
||||||
|
hlist_move_list(&peer->error_targets, &error_targets);
|
||||||
|
|
||||||
|
while (!hlist_empty(&error_targets)) {
|
||||||
|
call = hlist_entry(error_targets.first,
|
||||||
|
struct rxrpc_call, error_link);
|
||||||
|
hlist_del_init(&call->error_link);
|
||||||
|
spin_unlock(&peer->lock);
|
||||||
|
|
||||||
hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
|
|
||||||
rxrpc_see_call(call, rxrpc_call_see_distribute_error);
|
rxrpc_see_call(call, rxrpc_call_see_distribute_error);
|
||||||
rxrpc_set_call_completion(call, compl, 0, -error);
|
rxrpc_set_call_completion(call, compl, 0, -error);
|
||||||
|
|
||||||
|
spin_lock(&peer->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_unlock(&peer->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue