net: calxedaxgmac: rework transmit ring handling

Only generate tx interrupts on every ring size / 4 descriptors. Move the
netif_stop_queue call to the end of the xmit function rather than
checking at the beginning.

Signed-off-by: Rob Herring <rob.herring@calxeda.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Rob Herring 2012-11-05 06:22:23 +00:00 committed by David S. Miller
parent 9169963d80
commit 97a3a9a67b
1 changed files with 12 additions and 12 deletions

View File

@ -211,7 +211,7 @@
#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
DMA_INTR_ENA_TUE) DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
@ -374,6 +374,7 @@ struct xgmac_priv {
struct sk_buff **tx_skbuff; struct sk_buff **tx_skbuff;
unsigned int tx_head; unsigned int tx_head;
unsigned int tx_tail; unsigned int tx_tail;
int tx_irq_cnt;
void __iomem *base; void __iomem *base;
unsigned int dma_buf_sz; unsigned int dma_buf_sz;
@ -886,7 +887,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
} }
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
TX_THRESH) MAX_SKB_FRAGS)
netif_wake_queue(priv->dev); netif_wake_queue(priv->dev);
} }
@ -1057,19 +1058,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct xgmac_priv *priv = netdev_priv(dev); struct xgmac_priv *priv = netdev_priv(dev);
unsigned int entry; unsigned int entry;
int i; int i;
u32 irq_flag;
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
struct xgmac_dma_desc *desc, *first; struct xgmac_dma_desc *desc, *first;
unsigned int desc_flags; unsigned int desc_flags;
unsigned int len; unsigned int len;
dma_addr_t paddr; dma_addr_t paddr;
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
(nfrags + 1)) { irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
priv->base + XGMAC_DMA_INTR_ENA);
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
TXDESC_CSUM_ALL : 0; TXDESC_CSUM_ALL : 0;
@ -1110,9 +1107,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Interrupt on completition only for the latest segment */ /* Interrupt on completition only for the latest segment */
if (desc != first) if (desc != first)
desc_set_tx_owner(desc, desc_flags | desc_set_tx_owner(desc, desc_flags |
TXDESC_LAST_SEG | TXDESC_INTERRUPT); TXDESC_LAST_SEG | irq_flag);
else else
desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT; desc_flags |= TXDESC_LAST_SEG | irq_flag;
/* Set owner on first desc last to avoid race condition */ /* Set owner on first desc last to avoid race condition */
wmb(); wmb();
@ -1121,6 +1118,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
writel(1, priv->base + XGMAC_DMA_TX_POLL); writel(1, priv->base + XGMAC_DMA_TX_POLL);
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
MAX_SKB_FRAGS)
netif_stop_queue(dev);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@ -1397,7 +1397,7 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
} }
/* TX/RX NORMAL interrupts */ /* TX/RX NORMAL interrupts */
if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) { if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
__raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
napi_schedule(&priv->napi); napi_schedule(&priv->napi);
} }