From 876dbadd53a7102e2a84afc84ea2bd3ee6dc5636 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Fri, 14 Jul 2017 16:12:09 -0700 Subject: [PATCH 1/2] net: bcmgenet: Fix unmapping of fragments in bcmgenet_xmit() In case we fail to map a single fragment, we would be leaving the transmit ring populated with stale entries. This commit introduces the helper function bcmgenet_put_txcb() which takes care of rewinding the per-ring write pointer back to where we left. It also consolidates the functionality of bcmgenet_xmit_single() and bcmgenet_xmit_frag() into the bcmgenet_xmit() function to make the unmapping of control blocks cleaner. Fixes: 1c1008c793fa ("net: bcmgenet: add main driver file") Suggested-by: Florian Fainelli Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/genet/bcmgenet.c | 195 ++++++++---------- 1 file changed, 87 insertions(+), 108 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index daca1c9d254b..20021525f795 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1202,6 +1202,23 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, return tx_cb_ptr; } +static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Rewinding local write pointer */ + if (ring->write_ptr == ring->cb_ptr) + ring->write_ptr = ring->end_ptr; + else + ring->write_ptr--; + + return tx_cb_ptr; +} + /* Simple helper to free a control block's resources */ static void bcmgenet_free_cb(struct enet_cb *cb) { @@ -1380,95 +1397,6 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev) bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); } -/* Transmits a single SKB (either head of a fragment or a single SKB) - * caller must hold priv->lock - */ -static int bcmgenet_xmit_single(struct net_device *dev, - struct sk_buff *skb, - u16 dma_desc_flags, - struct bcmgenet_tx_ring *ring) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - struct device *kdev = &priv->pdev->dev; - struct enet_cb *tx_cb_ptr; - unsigned int skb_len; - dma_addr_t mapping; - u32 length_status; - int ret; - - tx_cb_ptr = bcmgenet_get_txcb(priv, ring); - - if (unlikely(!tx_cb_ptr)) - BUG(); - - tx_cb_ptr->skb = skb; - - skb_len = skb_headlen(skb); - - mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); - ret = dma_mapping_error(kdev, mapping); - if (ret) { - priv->mib.tx_dma_failed++; - netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); - dev_kfree_skb(skb); - return ret; - } - - dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); - dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len); - length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | - (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | - DMA_TX_APPEND_CRC; - - if (skb->ip_summed == CHECKSUM_PARTIAL) - length_status |= DMA_TX_DO_CSUM; - - dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); - - return 0; -} - -/* Transmit a SKB fragment */ -static int bcmgenet_xmit_frag(struct net_device *dev, - skb_frag_t *frag, - u16 dma_desc_flags, - struct bcmgenet_tx_ring *ring) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - struct device *kdev = &priv->pdev->dev; - struct enet_cb *tx_cb_ptr; - unsigned int frag_size; - dma_addr_t mapping; - int ret; - - tx_cb_ptr = bcmgenet_get_txcb(priv, ring); - - if (unlikely(!tx_cb_ptr)) - BUG(); - - tx_cb_ptr->skb = NULL; - - frag_size = skb_frag_size(frag); - - mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE); - ret = dma_mapping_error(kdev, mapping); - if (ret) { - priv->mib.tx_dma_failed++; - netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", - __func__); - return ret; - } - - dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); - dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size); - - dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, - (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | - (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); - - return 0; -} - /* Reallocate the SKB to put enough headroom in front of it and insert * the transmit checksum offsets in the descriptors */ @@ -1535,11 +1463,16 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; struct bcmgenet_tx_ring *ring = NULL; + struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; unsigned long flags = 0; int nr_frags, index; - u16 dma_desc_flags; + dma_addr_t mapping; + unsigned int size; + skb_frag_t *frag; + u32 len_stat; int ret; int i; @@ -1592,27 +1525,49 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) } } - dma_desc_flags = DMA_SOP; - if (nr_frags == 0) - dma_desc_flags |= DMA_EOP; + for (i = 0; i <= nr_frags; i++) { + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); - /* Transmit single SKB or head of fragment list */ - ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); - if (ret) { - ret = NETDEV_TX_OK; - goto out; - } + if (unlikely(!tx_cb_ptr)) + BUG(); - /* xmit fragment */ - for (i = 0; i < nr_frags; i++) { - ret = bcmgenet_xmit_frag(dev, - &skb_shinfo(skb)->frags[i], - (i == nr_frags - 1) ? DMA_EOP : 0, - ring); - if (ret) { - ret = NETDEV_TX_OK; - goto out; + if (!i) { + /* Transmit single SKB or head of fragment list */ + tx_cb_ptr->skb = skb; + size = skb_headlen(skb); + mapping = dma_map_single(kdev, skb->data, size, + DMA_TO_DEVICE); + } else { + /* xmit fragment */ + tx_cb_ptr->skb = NULL; + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + mapping = skb_frag_dma_map(kdev, frag, 0, size, + DMA_TO_DEVICE); } + + ret = dma_mapping_error(kdev, mapping); + if (ret) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); + ret = NETDEV_TX_OK; + goto out_unmap_frags; + } + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, size); + + len_stat = (size << DMA_BUFLENGTH_SHIFT) | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + + if (!i) { + len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + if (skb->ip_summed == CHECKSUM_PARTIAL) + len_stat |= DMA_TX_DO_CSUM; + } + if (i == nr_frags) + len_stat |= DMA_EOP; + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); } skb_tx_timestamp(skb); @@ -1635,6 +1590,30 @@ out: spin_unlock_irqrestore(&ring->lock, flags); return ret; + +out_unmap_frags: + /* Back up for failed control block mapping */ + bcmgenet_put_txcb(priv, ring); + + /* Unmap successfully mapped control blocks */ + while (i-- > 0) { + tx_cb_ptr = bcmgenet_put_txcb(priv, ring); + if (tx_cb_ptr->skb) + dma_unmap_single(kdev, + dma_unmap_addr(tx_cb_ptr, dma_addr), + dma_unmap_len(tx_cb_ptr, dma_len), + DMA_TO_DEVICE); + else + dma_unmap_page(kdev, + dma_unmap_addr(tx_cb_ptr, dma_addr), + dma_unmap_len(tx_cb_ptr, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); + tx_cb_ptr->skb = NULL; + } + + dev_kfree_skb(skb); + goto out; } static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, From f48bed16a756f5bc0244acd581f61968f7d7c2a4 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Fri, 14 Jul 2017 16:12:10 -0700 Subject: [PATCH 2/2] net: bcmgenet: Free skb after last Tx frag Since the skb is attached to the first control block of a fragmented skb it is possible that the skb could be freed when reclaiming that control block before all fragments of the skb have been consumed by the hardware and unmapped. This commit introduces first_cb and last_cb pointers to the skb control block used by the driver to keep track of which transmit control blocks within a transmit ring are the first and last ones associated with the skb. It then splits the bcmgenet_free_cb() function into transmit (bcmgenet_free_tx_cb) and receive (bcmgenet_free_rx_cb) versions that can handle the unmapping of dma mapped memory and cleaning up the corresponding control block structure so that the skb is only freed after the last associated transmit control block is reclaimed. Fixes: 1c1008c793fa ("net: bcmgenet: add main driver file") Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/genet/bcmgenet.c | 144 ++++++++++-------- .../net/ethernet/broadcom/genet/bcmgenet.h | 2 + 2 files changed, 85 insertions(+), 61 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 20021525f795..7b0b399aaedd 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1219,14 +1219,6 @@ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, return tx_cb_ptr; } -/* Simple helper to free a control block's resources */ -static void bcmgenet_free_cb(struct enet_cb *cb) -{ - dev_kfree_skb_any(cb->skb); - cb->skb = NULL; - dma_unmap_addr_set(cb, dma_addr, 0); -} - static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) { bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, @@ -1277,18 +1269,72 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) INTRL2_CPU_MASK_SET); } +/* Simple helper to free a transmit control block's resources + * Returns an skb when the last transmit control block associated with the + * skb is freed. The skb should be freed by the caller if necessary. + */ +static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + + if (skb) { + cb->skb = NULL; + if (cb == GENET_CB(skb)->first_cb) + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + + if (cb == GENET_CB(skb)->last_cb) + return skb; + + } else if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_page(dev, + dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return 0; +} + +/* Simple helper to free a receive control block's resources */ +static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + cb->skb = NULL; + + if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return skb; +} + /* Unlocked version of the reclaim routine */ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct device *kdev = &priv->pdev->dev; - struct enet_cb *tx_cb_ptr; - unsigned int pkts_compl = 0; - unsigned int bytes_compl = 0; - unsigned int c_index; - unsigned int txbds_ready; unsigned int txbds_processed = 0; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; + unsigned int txbds_ready; + unsigned int c_index; + struct sk_buff *skb; /* Clear status before servicing to reduce spurious interrupts */ if (ring->index == DESC_INDEX) @@ -1309,21 +1355,12 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, /* Reclaim transmitted buffers */ while (txbds_processed < txbds_ready) { - tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; - if (tx_cb_ptr->skb) { + skb = bcmgenet_free_tx_cb(&priv->pdev->dev, + &priv->tx_cbs[ring->clean_ptr]); + if (skb) { pkts_compl++; - bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; - dma_unmap_single(kdev, - dma_unmap_addr(tx_cb_ptr, dma_addr), - dma_unmap_len(tx_cb_ptr, dma_len), - DMA_TO_DEVICE); - bcmgenet_free_cb(tx_cb_ptr); - } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { - dma_unmap_page(kdev, - dma_unmap_addr(tx_cb_ptr, dma_addr), - dma_unmap_len(tx_cb_ptr, dma_len), - DMA_TO_DEVICE); - dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); + bytes_compl += GENET_CB(skb)->bytes_sent; + dev_kfree_skb_any(skb); } txbds_processed++; @@ -1533,13 +1570,12 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) if (!i) { /* Transmit single SKB or head of fragment list */ - tx_cb_ptr->skb = skb; + GENET_CB(skb)->first_cb = tx_cb_ptr; size = skb_headlen(skb); mapping = dma_map_single(kdev, skb->data, size, DMA_TO_DEVICE); } else { /* xmit fragment */ - tx_cb_ptr->skb = NULL; frag = &skb_shinfo(skb)->frags[i - 1]; size = skb_frag_size(frag); mapping = skb_frag_dma_map(kdev, frag, 0, size, @@ -1556,6 +1592,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); dma_unmap_len_set(tx_cb_ptr, dma_len, size); + tx_cb_ptr->skb = skb; + len_stat = (size << DMA_BUFLENGTH_SHIFT) | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); @@ -1570,6 +1608,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); } + GENET_CB(skb)->last_cb = tx_cb_ptr; skb_tx_timestamp(skb); /* Decrement total BD count and advance our write pointer */ @@ -1598,18 +1637,7 @@ out_unmap_frags: /* Unmap successfully mapped control blocks */ while (i-- > 0) { tx_cb_ptr = bcmgenet_put_txcb(priv, ring); - if (tx_cb_ptr->skb) - dma_unmap_single(kdev, - dma_unmap_addr(tx_cb_ptr, dma_addr), - dma_unmap_len(tx_cb_ptr, dma_len), - DMA_TO_DEVICE); - else - dma_unmap_page(kdev, - dma_unmap_addr(tx_cb_ptr, dma_addr), - dma_unmap_len(tx_cb_ptr, dma_len), - DMA_TO_DEVICE); - dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); - tx_cb_ptr->skb = NULL; + bcmgenet_free_tx_cb(kdev, tx_cb_ptr); } dev_kfree_skb(skb); @@ -1645,14 +1673,12 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, } /* Grab the current Rx skb from the ring and DMA-unmap it */ - rx_skb = cb->skb; - if (likely(rx_skb)) - dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), - priv->rx_buf_len, DMA_FROM_DEVICE); + rx_skb = bcmgenet_free_rx_cb(kdev, cb); /* Put the new Rx skb on the ring */ cb->skb = skb; dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); dmadesc_set_addr(priv, cb->bd_addr, mapping); /* Return the current Rx skb to caller */ @@ -1859,22 +1885,16 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) { - struct device *kdev = &priv->pdev->dev; + struct sk_buff *skb; struct enet_cb *cb; int i; for (i = 0; i < priv->num_rx_bds; i++) { cb = &priv->rx_cbs[i]; - if (dma_unmap_addr(cb, dma_addr)) { - dma_unmap_single(kdev, - dma_unmap_addr(cb, dma_addr), - priv->rx_buf_len, DMA_FROM_DEVICE); - dma_unmap_addr_set(cb, dma_addr, 0); - } - - if (cb->skb) - bcmgenet_free_cb(cb); + skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); + if (skb) + dev_kfree_skb_any(skb); } } @@ -2458,8 +2478,10 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) { - int i; struct netdev_queue *txq; + struct sk_buff *skb; + struct enet_cb *cb; + int i; bcmgenet_fini_rx_napi(priv); bcmgenet_fini_tx_napi(priv); @@ -2468,10 +2490,10 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) bcmgenet_dma_teardown(priv); for (i = 0; i < priv->num_tx_bds; i++) { - if (priv->tx_cbs[i].skb != NULL) { - dev_kfree_skb(priv->tx_cbs[i].skb); - priv->tx_cbs[i].skb = NULL; - } + cb = priv->tx_cbs + i; + skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb); + if (skb) + dev_kfree_skb(skb); } for (i = 0; i < priv->hw_params->tx_queues; i++) { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index efd07020b89f..b9344de669f8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -544,6 +544,8 @@ struct bcmgenet_hw_params { }; struct bcmgenet_skb_cb { + struct enet_cb *first_cb; /* First control block of SKB */ + struct enet_cb *last_cb; /* Last control block of SKB */ unsigned int bytes_sent; /* bytes on the wire (no TSB) */ };