rxrpc: Don't lock call->tx_lock to access call->tx_buffer
call->tx_buffer is now only accessed within the I/O thread (->tx_sendmsg is the way sendmsg passes packets to the I/O thread) so there's no need to lock around it. Signed-off-by: David Howells <dhowells@redhat.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org
This commit is contained in:
parent
f21e93485b
commit
b30d61f4b1
|
@ -110,12 +110,8 @@ void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *call)
|
|||
|
||||
_enter("%x/%x/%x", call->tx_bottom, call->acks_hard_ack, call->tx_top);
|
||||
|
||||
for (;;) {
|
||||
spin_lock(&call->tx_lock);
|
||||
txb = list_first_entry_or_null(&call->tx_buffer,
|
||||
struct rxrpc_txbuf, call_link);
|
||||
if (!txb)
|
||||
break;
|
||||
while ((txb = list_first_entry_or_null(&call->tx_buffer,
|
||||
struct rxrpc_txbuf, call_link))) {
|
||||
hard_ack = smp_load_acquire(&call->acks_hard_ack);
|
||||
if (before(hard_ack, txb->seq))
|
||||
break;
|
||||
|
@ -128,15 +124,11 @@ void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *call)
|
|||
|
||||
trace_rxrpc_txqueue(call, rxrpc_txqueue_dequeue);
|
||||
|
||||
spin_unlock(&call->tx_lock);
|
||||
|
||||
rxrpc_put_txbuf(txb, rxrpc_txbuf_put_rotated);
|
||||
if (after(call->acks_hard_ack, call->tx_bottom + 128))
|
||||
wake = true;
|
||||
}
|
||||
|
||||
spin_unlock(&call->tx_lock);
|
||||
|
||||
if (wake)
|
||||
wake_up(&call->waitq);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue