net: stmmac: prepare dma op mode config for multiple queues

This patch prepares DMA Operation Mode configuration for multiple queues.
The work consisted on breaking the DMA operation Mode configuration function
into RX and TX scope and adapting its mechanism in stmmac_main.

Signed-off-by: Joao Pinto <jpinto@synopsys.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Joao Pinto 2017-03-15 11:04:45 +00:00 committed by David S. Miller
parent e0304f58e8
commit 6deee2221e
3 changed files with 124 additions and 79 deletions

View File

@ -424,6 +424,9 @@ struct stmmac_dma_ops {
* An invalid value enables the store-and-forward mode */ * An invalid value enables the store-and-forward mode */
void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
int rxfifosz); int rxfifosz);
void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
int fifosz);
void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
/* To track extra statistic (if supported) */ /* To track extra statistic (if supported) */
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr); void __iomem *ioaddr);

View File

@ -182,70 +182,26 @@ static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i)); writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
} }
static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
int rxmode, u32 channel, int rxfifosz) u32 channel, int fifosz)
{ {
unsigned int rqs = rxfifosz / 256 - 1; unsigned int rqs = fifosz / 256 - 1;
u32 mtl_tx_op, mtl_rx_op, mtl_rx_int; u32 mtl_rx_op, mtl_rx_int;
/* Following code only done for channel 0, other channels not yet
* supported.
*/
mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
if (txmode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
mtl_tx_op |= MTL_OP_MODE_TSF;
} else {
pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
mtl_tx_op &= ~MTL_OP_MODE_TSF;
mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
/* Set the transmit threshold */
if (txmode <= 32)
mtl_tx_op |= MTL_OP_MODE_TTC_32;
else if (txmode <= 64)
mtl_tx_op |= MTL_OP_MODE_TTC_64;
else if (txmode <= 96)
mtl_tx_op |= MTL_OP_MODE_TTC_96;
else if (txmode <= 128)
mtl_tx_op |= MTL_OP_MODE_TTC_128;
else if (txmode <= 192)
mtl_tx_op |= MTL_OP_MODE_TTC_192;
else if (txmode <= 256)
mtl_tx_op |= MTL_OP_MODE_TTC_256;
else if (txmode <= 384)
mtl_tx_op |= MTL_OP_MODE_TTC_384;
else
mtl_tx_op |= MTL_OP_MODE_TTC_512;
}
/* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
* with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
* For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
* with reset values: TXQEN off, TQS 256 bytes.
*
* Write the bits in both cases, since it will have no effect when RO.
* For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
* be RO, however, writing the whole TQS field will result in a value
* equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
*/
mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
if (rxmode == SF_DMA_MODE) { if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n"); pr_debug("GMAC: enable RX store and forward mode\n");
mtl_rx_op |= MTL_OP_MODE_RSF; mtl_rx_op |= MTL_OP_MODE_RSF;
} else { } else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
mtl_rx_op &= ~MTL_OP_MODE_RSF; mtl_rx_op &= ~MTL_OP_MODE_RSF;
mtl_rx_op &= MTL_OP_MODE_RTC_MASK; mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
if (rxmode <= 32) if (mode <= 32)
mtl_rx_op |= MTL_OP_MODE_RTC_32; mtl_rx_op |= MTL_OP_MODE_RTC_32;
else if (rxmode <= 64) else if (mode <= 64)
mtl_rx_op |= MTL_OP_MODE_RTC_64; mtl_rx_op |= MTL_OP_MODE_RTC_64;
else if (rxmode <= 96) else if (mode <= 96)
mtl_rx_op |= MTL_OP_MODE_RTC_96; mtl_rx_op |= MTL_OP_MODE_RTC_96;
else else
mtl_rx_op |= MTL_OP_MODE_RTC_128; mtl_rx_op |= MTL_OP_MODE_RTC_128;
@ -255,7 +211,7 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
/* enable flow control only if each channel gets 4 KiB or more FIFO */ /* enable flow control only if each channel gets 4 KiB or more FIFO */
if (rxfifosz >= 4096) { if (fifosz >= 4096) {
unsigned int rfd, rfa; unsigned int rfd, rfa;
mtl_rx_op |= MTL_OP_MODE_EHFC; mtl_rx_op |= MTL_OP_MODE_EHFC;
@ -266,7 +222,7 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
* Set Threshold for Deactivating Flow Control to min 1 frame, * Set Threshold for Deactivating Flow Control to min 1 frame,
* i.e. 1500 bytes. * i.e. 1500 bytes.
*/ */
switch (rxfifosz) { switch (fifosz) {
case 4096: case 4096:
/* This violates the above formula because of FIFO size /* This violates the above formula because of FIFO size
* limit therefore overflow may occur in spite of this. * limit therefore overflow may occur in spite of this.
@ -306,11 +262,49 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
ioaddr + MTL_CHAN_INT_CTRL(channel)); ioaddr + MTL_CHAN_INT_CTRL(channel));
} }
static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode, static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
int rxmode, int rxfifosz) u32 channel)
{ {
/* Only Channel 0 is actually configured and used */ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0, rxfifosz);
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
mtl_tx_op |= MTL_OP_MODE_TSF;
} else {
pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
mtl_tx_op &= ~MTL_OP_MODE_TSF;
mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
/* Set the transmit threshold */
if (mode <= 32)
mtl_tx_op |= MTL_OP_MODE_TTC_32;
else if (mode <= 64)
mtl_tx_op |= MTL_OP_MODE_TTC_64;
else if (mode <= 96)
mtl_tx_op |= MTL_OP_MODE_TTC_96;
else if (mode <= 128)
mtl_tx_op |= MTL_OP_MODE_TTC_128;
else if (mode <= 192)
mtl_tx_op |= MTL_OP_MODE_TTC_192;
else if (mode <= 256)
mtl_tx_op |= MTL_OP_MODE_TTC_256;
else if (mode <= 384)
mtl_tx_op |= MTL_OP_MODE_TTC_384;
else
mtl_tx_op |= MTL_OP_MODE_TTC_512;
}
/* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
* with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
* For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
* with reset values: TXQEN off, TQS 256 bytes.
*
* Write the bits in both cases, since it will have no effect when RO.
* For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
* be RO, however, writing the whole TQS field will result in a value
* equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
*/
mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
} }
static void dwmac4_get_hw_feature(void __iomem *ioaddr, static void dwmac4_get_hw_feature(void __iomem *ioaddr,
@ -387,7 +381,8 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
.init = dwmac4_dma_init, .init = dwmac4_dma_init,
.axi = dwmac4_dma_axi, .axi = dwmac4_dma_axi,
.dump_regs = dwmac4_dump_dma_regs, .dump_regs = dwmac4_dump_dma_regs,
.dma_mode = dwmac4_dma_operation_mode, .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
.dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
.enable_dma_irq = dwmac4_enable_dma_irq, .enable_dma_irq = dwmac4_enable_dma_irq,
.disable_dma_irq = dwmac4_disable_dma_irq, .disable_dma_irq = dwmac4_disable_dma_irq,
.start_tx = dwmac4_dma_start_tx, .start_tx = dwmac4_dma_start_tx,
@ -409,7 +404,8 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.init = dwmac4_dma_init, .init = dwmac4_dma_init,
.axi = dwmac4_dma_axi, .axi = dwmac4_dma_axi,
.dump_regs = dwmac4_dump_dma_regs, .dump_regs = dwmac4_dump_dma_regs,
.dma_mode = dwmac4_dma_operation_mode, .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
.dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
.enable_dma_irq = dwmac410_enable_dma_irq, .enable_dma_irq = dwmac410_enable_dma_irq,
.disable_dma_irq = dwmac4_disable_dma_irq, .disable_dma_irq = dwmac4_disable_dma_irq,
.start_tx = dwmac4_dma_start_tx, .start_tx = dwmac4_dma_start_tx,

View File

@ -1285,14 +1285,20 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
*/ */
static void stmmac_dma_operation_mode(struct stmmac_priv *priv) static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{ {
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
int rxfifosz = priv->plat->rx_fifo_size; int rxfifosz = priv->plat->rx_fifo_size;
u32 txmode = 0;
u32 rxmode = 0;
u32 chan = 0;
if (rxfifosz == 0) if (rxfifosz == 0)
rxfifosz = priv->dma_cap.rx_fifo_size; rxfifosz = priv->dma_cap.rx_fifo_size;
if (priv->plat->force_thresh_dma_mode) if (priv->plat->force_thresh_dma_mode) {
priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); txmode = tc;
else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { rxmode = tc;
} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
/* /*
* In case of GMAC, SF mode can be enabled * In case of GMAC, SF mode can be enabled
* to perform the TX COE in HW. This depends on: * to perform the TX COE in HW. This depends on:
@ -1300,12 +1306,26 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* 2) There is no bugged Jumbo frame support * 2) There is no bugged Jumbo frame support
* that needs to not insert csum in the TDES. * that needs to not insert csum in the TDES.
*/ */
priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, txmode = SF_DMA_MODE;
rxfifosz); rxmode = SF_DMA_MODE;
priv->xstats.threshold = SF_DMA_MODE; priv->xstats.threshold = SF_DMA_MODE;
} else } else {
priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, txmode = tc;
rxmode = SF_DMA_MODE;
}
/* configure all channels */
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
for (chan = 0; chan < rx_channels_count; chan++)
priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
rxfifosz);
for (chan = 0; chan < tx_channels_count; chan++)
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
} else {
priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
rxfifosz); rxfifosz);
}
} }
/** /**
@ -1443,6 +1463,34 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
netif_wake_queue(priv->dev); netif_wake_queue(priv->dev);
} }
/**
* stmmac_set_dma_operation_mode - Set DMA operation mode by channel
* @priv: driver private structure
* @txmode: TX operating mode
* @rxmode: RX operating mode
* @chan: channel index
* Description: it is used for configuring of the DMA operation mode in
* runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
* mode.
*/
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
u32 rxmode, u32 chan)
{
int rxfifosz = priv->plat->rx_fifo_size;
if (rxfifosz == 0)
rxfifosz = priv->dma_cap.rx_fifo_size;
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
rxfifosz);
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
} else {
priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
rxfifosz);
}
}
/** /**
* stmmac_dma_interrupt - DMA ISR * stmmac_dma_interrupt - DMA ISR
* @priv: driver private structure * @priv: driver private structure
@ -1452,11 +1500,8 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
*/ */
static void stmmac_dma_interrupt(struct stmmac_priv *priv) static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{ {
u32 chan = STMMAC_CHAN0;
int status; int status;
int rxfifosz = priv->plat->rx_fifo_size;
if (rxfifosz == 0)
rxfifosz = priv->dma_cap.rx_fifo_size;
status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
if (likely((status & handle_rx)) || (status & handle_tx)) { if (likely((status & handle_rx)) || (status & handle_tx)) {
@ -1471,11 +1516,12 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
(tc <= 256)) { (tc <= 256)) {
tc += 64; tc += 64;
if (priv->plat->force_thresh_dma_mode) if (priv->plat->force_thresh_dma_mode)
priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, stmmac_set_dma_operation_mode(priv->ioaddr,
rxfifosz); tc, tc, chan);
else else
priv->hw->dma->dma_mode(priv->ioaddr, tc, stmmac_set_dma_operation_mode(priv->ioaddr, tc,
SF_DMA_MODE, rxfifosz); SF_DMA_MODE, chan);
priv->xstats.threshold = tc; priv->xstats.threshold = tc;
} }
} else if (unlikely(status == tx_hard_error)) } else if (unlikely(status == tx_hard_error))
@ -1749,6 +1795,9 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
/* Enable MAC RX Queues */ /* Enable MAC RX Queues */
if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable) if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
stmmac_mac_enable_rx_queues(priv); stmmac_mac_enable_rx_queues(priv);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
} }
/** /**
@ -1812,9 +1861,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
else else
stmmac_set_mac(priv->ioaddr, true); stmmac_set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
stmmac_mmc_setup(priv); stmmac_mmc_setup(priv);
if (init_ptp) { if (init_ptp) {