[TG3]: Minimize locking in TX path.

This is similar to Eric Dumazet's tx_lock patch for tg3 but takes it
one step further to eliminate the tx_lock in the tx_completion path
when the tx queue is not stopped.

Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Michael Chan 2005-09-01 17:41:28 -07:00 committed by David S. Miller
parent 86d9f7f0c9
commit 51b9146869
1 changed files with 13 additions and 11 deletions

View File

@ -121,12 +121,9 @@
TG3_RX_RCB_RING_SIZE(tp)) TG3_RX_RCB_RING_SIZE(tp))
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
TG3_TX_RING_SIZE) TG3_TX_RING_SIZE)
#define TX_RING_GAP(TP) \
(TG3_TX_RING_SIZE - (TP)->tx_pending)
#define TX_BUFFS_AVAIL(TP) \ #define TX_BUFFS_AVAIL(TP) \
(((TP)->tx_cons <= (TP)->tx_prod) ? \ ((TP)->tx_pending - \
(TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \ (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
(TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
@ -2880,9 +2877,13 @@ static void tg3_tx(struct tg3 *tp)
tp->tx_cons = sw_idx; tp->tx_cons = sw_idx;
if (netif_queue_stopped(tp->dev) && if (unlikely(netif_queue_stopped(tp->dev))) {
(TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) spin_lock(&tp->tx_lock);
netif_wake_queue(tp->dev); if (netif_queue_stopped(tp->dev) &&
(TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
netif_wake_queue(tp->dev);
spin_unlock(&tp->tx_lock);
}
} }
/* Returns size of skb allocated or < 0 on error. /* Returns size of skb allocated or < 0 on error.
@ -3198,9 +3199,7 @@ static int tg3_poll(struct net_device *netdev, int *budget)
/* run TX completion thread */ /* run TX completion thread */
if (sblk->idx[0].tx_consumer != tp->tx_cons) { if (sblk->idx[0].tx_consumer != tp->tx_cons) {
spin_lock(&tp->tx_lock);
tg3_tx(tp); tg3_tx(tp);
spin_unlock(&tp->tx_lock);
} }
/* run RX thread, within the bounds set by NAPI. /* run RX thread, within the bounds set by NAPI.
@ -3716,8 +3715,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry; tp->tx_prod = entry;
if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
netif_stop_queue(dev); netif_stop_queue(dev);
if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev);
}
out_unlock: out_unlock:
mmiowb(); mmiowb();