staging: et131x: Simplify unlocking tcb_send_qlock in et131x_tx_timeout()

The tcb_send_qlock spinlock is unlocked in all three paths at the end of
et131x_tx_timeout(). We can call it once before entering any of the paths,
saving ourselves a few lines of code.

This change puts tcb->count++ outside of the lock, but et131x_tx_timeout()
itself is protected by the tx_global_lock, so this shouldn't matter.

Signed-off-by: Mark Einon <mark.einon@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Mark Einon 2014-09-14 16:58:59 +01:00 committed by Greg Kroah-Hartman
parent 0b06912b84
commit 82d95799d8
1 changed files with 2 additions and 8 deletions

View File

@ -4170,16 +4170,13 @@ static void et131x_tx_timeout(struct net_device *netdev)
/* Is send stuck? */
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
tcb = tx_ring->send_head;
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
if (tcb != NULL) {
if (tcb) {
tcb->count++;
if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
spin_unlock_irqrestore(&adapter->tcb_send_qlock,
flags);
dev_warn(&adapter->pdev->dev,
"Send stuck - reset. tcb->WrIndex %x\n",
tcb->index);
@ -4189,11 +4186,8 @@ static void et131x_tx_timeout(struct net_device *netdev)
/* perform reset of tx/rx */
et131x_disable_txrx(netdev);
et131x_enable_txrx(netdev);
return;
}
}
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
}
/* et131x_change_mtu - The handler called to change the MTU for the device */