Merge branch 'amd-xgbe-next'
Tom Lendacky says: ==================== amd-xgbe: AMD 10Gb Ethernet driver updates The following series fixes some bugs and provides new/changed support in the driver. - Fix a debugfs backward compatibility issue introduced by a previous patch - Write to the interrupt enablement register, not the status register when setting MTL interrupts - Call netif_napi_del whenever the ndo_stop operation is called (to match the call to netif_napi_add on ndo_open) - Peformance enhancements: - Adjusted default coalescing settings - AXI DMA changes (burst length size and cache settings) - ioread/iowrite reduction during interrupt - Napi poll updates - AXI DMA settings based on device tree property to account for a change in the ARM64 default cache operations assignment This patch series is based on net-next. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
52d14c6661
|
@ -170,6 +170,8 @@
|
|||
#define DMA_MR_SWR_WIDTH 1
|
||||
#define DMA_SBMR_EAME_INDEX 11
|
||||
#define DMA_SBMR_EAME_WIDTH 1
|
||||
#define DMA_SBMR_BLEN_256_INDEX 7
|
||||
#define DMA_SBMR_BLEN_256_WIDTH 1
|
||||
#define DMA_SBMR_UNDEF_INDEX 0
|
||||
#define DMA_SBMR_UNDEF_WIDTH 1
|
||||
|
||||
|
|
|
@ -165,9 +165,9 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
|
|||
return len;
|
||||
|
||||
workarea[len] = '\0';
|
||||
ret = kstrtouint(workarea, 0, value);
|
||||
ret = kstrtouint(workarea, 16, value);
|
||||
if (ret)
|
||||
return ret;
|
||||
return -EIO;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
|
|
@ -486,7 +486,7 @@ static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
|
|||
XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
|
||||
|
||||
/* No MTL interrupts to be enabled */
|
||||
XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0);
|
||||
XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1306,56 +1306,48 @@ static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
|
|||
return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
|
||||
}
|
||||
|
||||
static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
|
||||
enum xgbe_int_state int_state)
|
||||
{
|
||||
unsigned int dma_ch_ier;
|
||||
|
||||
if (int_state == XGMAC_INT_STATE_SAVE) {
|
||||
channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
|
||||
channel->saved_ier &= XGBE_DMA_INTERRUPT_MASK;
|
||||
} else {
|
||||
dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
|
||||
dma_ch_ier |= channel->saved_ier;
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
|
||||
}
|
||||
}
|
||||
|
||||
static int xgbe_enable_int(struct xgbe_channel *channel,
|
||||
enum xgbe_int int_id)
|
||||
{
|
||||
unsigned int dma_ch_ier;
|
||||
|
||||
dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
|
||||
|
||||
switch (int_id) {
|
||||
case XGMAC_INT_DMA_ISR_DC0IS:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_TI:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_TPS:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_TBU:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_RI:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_RBU:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_RPS:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_TI_RI:
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_FBE:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
|
||||
break;
|
||||
case XGMAC_INT_DMA_ALL:
|
||||
xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
|
||||
dma_ch_ier |= channel->saved_ier;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1364,42 +1356,44 @@ static int xgbe_disable_int(struct xgbe_channel *channel,
|
|||
{
|
||||
unsigned int dma_ch_ier;
|
||||
|
||||
dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
|
||||
|
||||
switch (int_id) {
|
||||
case XGMAC_INT_DMA_ISR_DC0IS:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_TI:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_TPS:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_TBU:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_RI:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_RBU:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_RPS:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_TI_RI:
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
|
||||
break;
|
||||
case XGMAC_INT_DMA_CH_SR_FBE:
|
||||
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
|
||||
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
|
||||
break;
|
||||
case XGMAC_INT_DMA_ALL:
|
||||
xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
|
||||
|
||||
dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
|
||||
channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
|
||||
dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1453,6 +1447,7 @@ static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
|
|||
|
||||
/* Set the System Bus mode */
|
||||
XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
|
||||
XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
|
||||
}
|
||||
|
||||
static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
|
||||
|
@ -1460,23 +1455,23 @@ static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
|
|||
unsigned int arcache, awcache;
|
||||
|
||||
arcache = 0;
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, XGBE_DMA_ARCACHE);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, XGBE_DMA_ARDOMAIN);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, XGBE_DMA_ARCACHE);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, XGBE_DMA_ARDOMAIN);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, XGBE_DMA_ARCACHE);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, XGBE_DMA_ARDOMAIN);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
|
||||
XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
|
||||
XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
|
||||
|
||||
awcache = 0;
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, XGBE_DMA_AWCACHE);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, XGBE_DMA_AWDOMAIN);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, XGBE_DMA_AWCACHE);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, XGBE_DMA_AWDOMAIN);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, XGBE_DMA_AWCACHE);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, XGBE_DMA_AWDOMAIN);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, XGBE_DMA_AWCACHE);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, XGBE_DMA_AWDOMAIN);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
|
||||
XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
|
||||
XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
|
||||
}
|
||||
|
||||
|
|
|
@ -156,16 +156,21 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
|
|||
{
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
struct xgbe_channel *channel;
|
||||
enum xgbe_int int_id;
|
||||
unsigned int i;
|
||||
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++) {
|
||||
if (channel->tx_ring)
|
||||
hw_if->enable_int(channel,
|
||||
XGMAC_INT_DMA_CH_SR_TI);
|
||||
if (channel->rx_ring)
|
||||
hw_if->enable_int(channel,
|
||||
XGMAC_INT_DMA_CH_SR_RI);
|
||||
if (channel->tx_ring && channel->rx_ring)
|
||||
int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
|
||||
else if (channel->tx_ring)
|
||||
int_id = XGMAC_INT_DMA_CH_SR_TI;
|
||||
else if (channel->rx_ring)
|
||||
int_id = XGMAC_INT_DMA_CH_SR_RI;
|
||||
else
|
||||
continue;
|
||||
|
||||
hw_if->enable_int(channel, int_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -173,16 +178,21 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
|
|||
{
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
struct xgbe_channel *channel;
|
||||
enum xgbe_int int_id;
|
||||
unsigned int i;
|
||||
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++) {
|
||||
if (channel->tx_ring)
|
||||
hw_if->disable_int(channel,
|
||||
XGMAC_INT_DMA_CH_SR_TI);
|
||||
if (channel->rx_ring)
|
||||
hw_if->disable_int(channel,
|
||||
XGMAC_INT_DMA_CH_SR_RI);
|
||||
if (channel->tx_ring && channel->rx_ring)
|
||||
int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
|
||||
else if (channel->tx_ring)
|
||||
int_id = XGMAC_INT_DMA_CH_SR_TI;
|
||||
else if (channel->rx_ring)
|
||||
int_id = XGMAC_INT_DMA_CH_SR_RI;
|
||||
else
|
||||
continue;
|
||||
|
||||
hw_if->disable_int(channel, int_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -412,9 +422,12 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
|
|||
napi_enable(&pdata->napi);
|
||||
}
|
||||
|
||||
static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
|
||||
static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
|
||||
{
|
||||
napi_disable(&pdata->napi);
|
||||
|
||||
if (del)
|
||||
netif_napi_del(&pdata->napi);
|
||||
}
|
||||
|
||||
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
|
||||
|
@ -518,7 +531,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
|
|||
netif_device_detach(netdev);
|
||||
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
xgbe_napi_disable(pdata);
|
||||
xgbe_napi_disable(pdata, 0);
|
||||
|
||||
/* Powerdown Tx/Rx */
|
||||
hw_if->powerdown_tx(pdata);
|
||||
|
@ -607,7 +620,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
|
|||
phy_stop(pdata->phydev);
|
||||
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
xgbe_napi_disable(pdata);
|
||||
xgbe_napi_disable(pdata, 1);
|
||||
|
||||
xgbe_stop_tx_timers(pdata);
|
||||
|
||||
|
@ -1111,6 +1124,22 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
|
|||
return (struct net_device_ops *)&xgbe_netdev_ops;
|
||||
}
|
||||
|
||||
static void xgbe_rx_refresh(struct xgbe_channel *channel)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = channel->pdata;
|
||||
struct xgbe_desc_if *desc_if = &pdata->desc_if;
|
||||
struct xgbe_ring *ring = channel->rx_ring;
|
||||
struct xgbe_ring_data *rdata;
|
||||
|
||||
desc_if->realloc_skb(channel);
|
||||
|
||||
/* Update the Rx Tail Pointer Register with address of
|
||||
* the last cleaned entry */
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
|
||||
lower_32_bits(rdata->rdesc_dma));
|
||||
}
|
||||
|
||||
static int xgbe_tx_poll(struct xgbe_channel *channel)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = channel->pdata;
|
||||
|
@ -1168,7 +1197,6 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
{
|
||||
struct xgbe_prv_data *pdata = channel->pdata;
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
struct xgbe_desc_if *desc_if = &pdata->desc_if;
|
||||
struct xgbe_ring *ring = channel->rx_ring;
|
||||
struct xgbe_ring_data *rdata;
|
||||
struct xgbe_packet_data *packet;
|
||||
|
@ -1195,6 +1223,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
cur_len = 0;
|
||||
|
||||
read_again:
|
||||
if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
|
||||
xgbe_rx_refresh(channel);
|
||||
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
|
||||
if (hw_if->dev_read(channel))
|
||||
|
@ -1282,16 +1313,6 @@ read_again:
|
|||
napi_gro_receive(&pdata->napi, skb);
|
||||
}
|
||||
|
||||
if (received) {
|
||||
desc_if->realloc_skb(channel);
|
||||
|
||||
/* Update the Rx Tail Pointer Register with address of
|
||||
* the last cleaned entry */
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
|
||||
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
|
||||
lower_32_bits(rdata->rdesc_dma));
|
||||
}
|
||||
|
||||
DBGPR("<--xgbe_rx_poll: received = %d\n", received);
|
||||
|
||||
return received;
|
||||
|
@ -1302,21 +1323,28 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
|
|||
struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
|
||||
napi);
|
||||
struct xgbe_channel *channel;
|
||||
int processed;
|
||||
int ring_budget;
|
||||
int processed, last_processed;
|
||||
unsigned int i;
|
||||
|
||||
DBGPR("-->xgbe_poll: budget=%d\n", budget);
|
||||
|
||||
/* Cleanup Tx ring first */
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++)
|
||||
xgbe_tx_poll(channel);
|
||||
|
||||
/* Process Rx ring next */
|
||||
processed = 0;
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++)
|
||||
processed += xgbe_rx_poll(channel, budget - processed);
|
||||
ring_budget = budget / pdata->rx_ring_count;
|
||||
do {
|
||||
last_processed = processed;
|
||||
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++) {
|
||||
/* Cleanup Tx ring first */
|
||||
xgbe_tx_poll(channel);
|
||||
|
||||
/* Process Rx ring next */
|
||||
if (ring_budget > (budget - processed))
|
||||
ring_budget = budget - processed;
|
||||
processed += xgbe_rx_poll(channel, ring_budget);
|
||||
}
|
||||
} while ((processed < budget) && (processed != last_processed));
|
||||
|
||||
/* If we processed everything, we are done */
|
||||
if (processed < budget) {
|
||||
|
|
|
@ -297,6 +297,16 @@ static int xgbe_probe(struct platform_device *pdev)
|
|||
*(dev->dma_mask) = DMA_BIT_MASK(40);
|
||||
dev->coherent_dma_mask = DMA_BIT_MASK(40);
|
||||
|
||||
if (of_property_read_bool(dev->of_node, "dma-coherent")) {
|
||||
pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
|
||||
pdata->arcache = XGBE_DMA_OS_ARCACHE;
|
||||
pdata->awcache = XGBE_DMA_OS_AWCACHE;
|
||||
} else {
|
||||
pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
|
||||
pdata->arcache = XGBE_DMA_SYS_ARCACHE;
|
||||
pdata->awcache = XGBE_DMA_SYS_AWCACHE;
|
||||
}
|
||||
|
||||
ret = platform_get_irq(pdev, 0);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "platform_get_irq failed\n");
|
||||
|
|
|
@ -143,10 +143,14 @@
|
|||
#define XGBE_MAX_DMA_CHANNELS 16
|
||||
|
||||
/* DMA cache settings - Outer sharable, write-back, write-allocate */
|
||||
#define XGBE_DMA_ARDOMAIN 0x2
|
||||
#define XGBE_DMA_ARCACHE 0xb
|
||||
#define XGBE_DMA_AWDOMAIN 0x2
|
||||
#define XGBE_DMA_AWCACHE 0x7
|
||||
#define XGBE_DMA_OS_AXDOMAIN 0x2
|
||||
#define XGBE_DMA_OS_ARCACHE 0xb
|
||||
#define XGBE_DMA_OS_AWCACHE 0xf
|
||||
|
||||
/* DMA cache settings - System, no caches used */
|
||||
#define XGBE_DMA_SYS_AXDOMAIN 0x3
|
||||
#define XGBE_DMA_SYS_ARCACHE 0x0
|
||||
#define XGBE_DMA_SYS_AWCACHE 0x0
|
||||
|
||||
#define XGBE_DMA_INTERRUPT_MASK 0x31c7
|
||||
|
||||
|
@ -181,12 +185,12 @@
|
|||
|
||||
|
||||
/* Default coalescing parameters */
|
||||
#define XGMAC_INIT_DMA_TX_USECS 100
|
||||
#define XGMAC_INIT_DMA_TX_FRAMES 16
|
||||
#define XGMAC_INIT_DMA_TX_USECS 50
|
||||
#define XGMAC_INIT_DMA_TX_FRAMES 25
|
||||
|
||||
#define XGMAC_MAX_DMA_RIWT 0xff
|
||||
#define XGMAC_INIT_DMA_RX_USECS 100
|
||||
#define XGMAC_INIT_DMA_RX_FRAMES 16
|
||||
#define XGMAC_INIT_DMA_RX_USECS 30
|
||||
#define XGMAC_INIT_DMA_RX_FRAMES 25
|
||||
|
||||
/* Flow control queue count */
|
||||
#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
|
||||
|
@ -307,13 +311,13 @@ struct xgbe_channel {
|
|||
} ____cacheline_aligned;
|
||||
|
||||
enum xgbe_int {
|
||||
XGMAC_INT_DMA_ISR_DC0IS,
|
||||
XGMAC_INT_DMA_CH_SR_TI,
|
||||
XGMAC_INT_DMA_CH_SR_TPS,
|
||||
XGMAC_INT_DMA_CH_SR_TBU,
|
||||
XGMAC_INT_DMA_CH_SR_RI,
|
||||
XGMAC_INT_DMA_CH_SR_RBU,
|
||||
XGMAC_INT_DMA_CH_SR_RPS,
|
||||
XGMAC_INT_DMA_CH_SR_TI_RI,
|
||||
XGMAC_INT_DMA_CH_SR_FBE,
|
||||
XGMAC_INT_DMA_ALL,
|
||||
};
|
||||
|
@ -536,6 +540,11 @@ struct xgbe_prv_data {
|
|||
struct xgbe_hw_if hw_if;
|
||||
struct xgbe_desc_if desc_if;
|
||||
|
||||
/* AXI DMA settings */
|
||||
unsigned int axdomain;
|
||||
unsigned int arcache;
|
||||
unsigned int awcache;
|
||||
|
||||
/* Rings for Tx/Rx on a DMA channel */
|
||||
struct xgbe_channel *channel;
|
||||
unsigned int channel_count;
|
||||
|
|
Loading…
Reference in New Issue