Merge branch 'axienet-Update-error-handling-and-add-64-bit-DMA-support'
Andre Przywara says:
====================
net: axienet: Update error handling and add 64-bit DMA support
a minor update, fixing the 32-bit build breakage, and brightening up
Dave's christmas tree. Rebased against latest net-next/master.
This series is based on net-next as of today (9970de8b01
), which
includes Russell's fixes [1], solving the SGMII issues I have had.
[1] https://lore.kernel.org/netdev/E1j6trA-0003GY-N1@rmk-PC.armlinux.org.uk/
Changelog v2 .. v3:
- Use two "left-shifts by 16" to fix builds with 32-bit phys_addr_t
- reorder variable declarations
Changelog v1 .. v2:
- Add Reviewed-by: tags from Radhey
- Extend kerndoc documentation
- Convert DMA error handler tasklet to work queue
- log DMA mapping errors
- mark DMA mapping error checks as unlikely (in "hot" paths)
- return NETDEV_TX_OK on TX DMA mapping error (increasing TX drop counter)
- Request eth IRQ as an optional IRQ
- Remove no longer needed MDIO IRQ register names
- Drop DT propery check for address width, assume full 64 bit
This series updates the Xilinx Axienet driver to work on our board
here. One big issue was broken SGMII support, which Russell fixed already
(in net-next).
While debugging and understanding the driver, I found several problems
in the error handling and cleanup paths, which patches 2-7 address.
Patch 8 removes a annoying error message, patch 9 paves the way for newer
revisions of the IP. The next patch adds mii-tool support, just for good
measure.
The next four patches add support for 64-bit DMA. This is an integration
option on newer IP revisions (>= v7.1), and expects MSB bits in formerly
reserved registers. Without writing to those MSB registers, the state
machine won't trigger, so it's mandatory to access them, even if they
are zero. Patches 11 and 12 prepare the code by adding accessors, to
wrap this properly and keep it working on older IP revisions.
Patch 13 enables access to the MSB registers, by trying to write a
non-zero value to them and checking if that sticks. Older IP revisions
always read those registers as zero.
Patch 14 then adjusts the DMA mask, based on the autodetected MSB
feature. It uses the full 64 bits in this case, the rest of the system
(actual physical addresses in use) should provide a natural limit if the
chip has connected fewer address lines. If not, the parent DT node can
use a dma-range property.
The Xilinx PG138 and PG021 documents (in versions 7.1 in both cases)
were used for this series.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
148aa2a86c
|
@ -32,7 +32,6 @@ config XILINX_AXI_EMAC
|
|||
|
||||
config XILINX_LL_TEMAC
|
||||
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
|
||||
depends on PPC || MICROBLAZE || X86 || COMPILE_TEST
|
||||
select PHYLIB
|
||||
---help---
|
||||
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
|
||||
|
|
|
@ -161,17 +161,11 @@
|
|||
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
|
||||
#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
|
||||
#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
|
||||
#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
|
||||
#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
|
||||
#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
|
||||
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
|
||||
#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
|
||||
#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
|
||||
/* MII Mgmt Interrupt Pending register offset */
|
||||
#define XAE_MDIO_MIP_OFFSET 0x00000620
|
||||
/* MII Management Interrupt Enable register offset */
|
||||
#define XAE_MDIO_MIE_OFFSET 0x00000640
|
||||
/* MII Management Interrupt Clear register offset. */
|
||||
#define XAE_MDIO_MIC_OFFSET 0x00000660
|
||||
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
|
||||
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
|
||||
#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
|
||||
|
@ -335,6 +329,7 @@
|
|||
#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
|
||||
#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
|
||||
#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
|
||||
#define XAE_FEATURE_DMA_64BIT (1 << 4)
|
||||
|
||||
#define XAE_NO_CSUM_OFFLOAD 0
|
||||
|
||||
|
@ -347,9 +342,9 @@
|
|||
/**
|
||||
* struct axidma_bd - Axi Dma buffer descriptor layout
|
||||
* @next: MM2S/S2MM Next Descriptor Pointer
|
||||
* @reserved1: Reserved and not used
|
||||
* @next_msb: MM2S/S2MM Next Descriptor Pointer (high 32 bits)
|
||||
* @phys: MM2S/S2MM Buffer Address
|
||||
* @reserved2: Reserved and not used
|
||||
* @phys_msb: MM2S/S2MM Buffer Address (high 32 bits)
|
||||
* @reserved3: Reserved and not used
|
||||
* @reserved4: Reserved and not used
|
||||
* @cntrl: MM2S/S2MM Control value
|
||||
|
@ -362,9 +357,9 @@
|
|||
*/
|
||||
struct axidma_bd {
|
||||
u32 next; /* Physical address of next buffer descriptor */
|
||||
u32 reserved1;
|
||||
u32 next_msb; /* high 32 bits for IP >= v7.1, reserved on older IP */
|
||||
u32 phys;
|
||||
u32 reserved2;
|
||||
u32 phys_msb; /* for IP >= v7.1, reserved for older IP */
|
||||
u32 reserved3;
|
||||
u32 reserved4;
|
||||
u32 cntrl;
|
||||
|
@ -435,7 +430,7 @@ struct axienet_local {
|
|||
void __iomem *regs;
|
||||
void __iomem *dma_regs;
|
||||
|
||||
struct tasklet_struct dma_err_tasklet;
|
||||
struct work_struct dma_err_task;
|
||||
|
||||
int tx_irq;
|
||||
int rx_irq;
|
||||
|
|
|
@ -147,6 +147,34 @@ static inline void axienet_dma_out32(struct axienet_local *lp,
|
|||
iowrite32(value, lp->dma_regs + reg);
|
||||
}
|
||||
|
||||
static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
axienet_dma_out32(lp, reg, lower_32_bits(addr));
|
||||
|
||||
if (lp->features & XAE_FEATURE_DMA_64BIT)
|
||||
axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
|
||||
}
|
||||
|
||||
static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
|
||||
struct axidma_bd *desc)
|
||||
{
|
||||
desc->phys = lower_32_bits(addr);
|
||||
if (lp->features & XAE_FEATURE_DMA_64BIT)
|
||||
desc->phys_msb = upper_32_bits(addr);
|
||||
}
|
||||
|
||||
static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
|
||||
struct axidma_bd *desc)
|
||||
{
|
||||
dma_addr_t ret = desc->phys;
|
||||
|
||||
if (lp->features & XAE_FEATURE_DMA_64BIT)
|
||||
ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* axienet_dma_bd_release - Release buffer descriptor rings
|
||||
* @ndev: Pointer to the net_device structure
|
||||
|
@ -160,24 +188,41 @@ static void axienet_dma_bd_release(struct net_device *ndev)
|
|||
int i;
|
||||
struct axienet_local *lp = netdev_priv(ndev);
|
||||
|
||||
/* If we end up here, tx_bd_v must have been DMA allocated. */
|
||||
dma_free_coherent(ndev->dev.parent,
|
||||
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
|
||||
lp->tx_bd_v,
|
||||
lp->tx_bd_p);
|
||||
|
||||
if (!lp->rx_bd_v)
|
||||
return;
|
||||
|
||||
for (i = 0; i < lp->rx_bd_num; i++) {
|
||||
dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
|
||||
lp->max_frm_size, DMA_FROM_DEVICE);
|
||||
dma_addr_t phys;
|
||||
|
||||
/* A NULL skb means this descriptor has not been initialised
|
||||
* at all.
|
||||
*/
|
||||
if (!lp->rx_bd_v[i].skb)
|
||||
break;
|
||||
|
||||
dev_kfree_skb(lp->rx_bd_v[i].skb);
|
||||
|
||||
/* For each descriptor, we programmed cntrl with the (non-zero)
|
||||
* descriptor size, after it had been successfully allocated.
|
||||
* So a non-zero value in there means we need to unmap it.
|
||||
*/
|
||||
if (lp->rx_bd_v[i].cntrl) {
|
||||
phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
|
||||
dma_unmap_single(ndev->dev.parent, phys,
|
||||
lp->max_frm_size, DMA_FROM_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
if (lp->rx_bd_v) {
|
||||
dma_free_coherent(ndev->dev.parent,
|
||||
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
|
||||
lp->rx_bd_v,
|
||||
lp->rx_bd_p);
|
||||
}
|
||||
if (lp->tx_bd_v) {
|
||||
dma_free_coherent(ndev->dev.parent,
|
||||
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
|
||||
lp->tx_bd_v,
|
||||
lp->tx_bd_p);
|
||||
}
|
||||
dma_free_coherent(ndev->dev.parent,
|
||||
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
|
||||
lp->rx_bd_v,
|
||||
lp->rx_bd_p);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -207,7 +252,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
|
|||
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
|
||||
&lp->tx_bd_p, GFP_KERNEL);
|
||||
if (!lp->tx_bd_v)
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
|
||||
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
|
||||
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
|
||||
|
@ -216,25 +261,37 @@ static int axienet_dma_bd_init(struct net_device *ndev)
|
|||
goto out;
|
||||
|
||||
for (i = 0; i < lp->tx_bd_num; i++) {
|
||||
lp->tx_bd_v[i].next = lp->tx_bd_p +
|
||||
sizeof(*lp->tx_bd_v) *
|
||||
((i + 1) % lp->tx_bd_num);
|
||||
dma_addr_t addr = lp->tx_bd_p +
|
||||
sizeof(*lp->tx_bd_v) *
|
||||
((i + 1) % lp->tx_bd_num);
|
||||
|
||||
lp->tx_bd_v[i].next = lower_32_bits(addr);
|
||||
if (lp->features & XAE_FEATURE_DMA_64BIT)
|
||||
lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
|
||||
}
|
||||
|
||||
for (i = 0; i < lp->rx_bd_num; i++) {
|
||||
lp->rx_bd_v[i].next = lp->rx_bd_p +
|
||||
sizeof(*lp->rx_bd_v) *
|
||||
((i + 1) % lp->rx_bd_num);
|
||||
dma_addr_t addr;
|
||||
|
||||
addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
|
||||
((i + 1) % lp->rx_bd_num);
|
||||
lp->rx_bd_v[i].next = lower_32_bits(addr);
|
||||
if (lp->features & XAE_FEATURE_DMA_64BIT)
|
||||
lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
|
||||
|
||||
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
|
||||
if (!skb)
|
||||
goto out;
|
||||
|
||||
lp->rx_bd_v[i].skb = skb;
|
||||
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
|
||||
skb->data,
|
||||
lp->max_frm_size,
|
||||
DMA_FROM_DEVICE);
|
||||
addr = dma_map_single(ndev->dev.parent, skb->data,
|
||||
lp->max_frm_size, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ndev->dev.parent, addr)) {
|
||||
netdev_err(ndev, "DMA mapping error\n");
|
||||
goto out;
|
||||
}
|
||||
desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
|
||||
|
||||
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
|
||||
}
|
||||
|
||||
|
@ -267,18 +324,18 @@ static int axienet_dma_bd_init(struct net_device *ndev)
|
|||
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
|
||||
* halted state. This will make the Rx side ready for reception.
|
||||
*/
|
||||
axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
|
||||
axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
|
||||
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
|
||||
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
|
||||
cr | XAXIDMA_CR_RUNSTOP_MASK);
|
||||
axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
|
||||
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
|
||||
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
|
||||
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
|
||||
|
||||
/* Write to the RS (Run-stop) bit in the Tx channel control register.
|
||||
* Tx channel is now ready to run. But only after we write to the
|
||||
* tail pointer register that the Tx channel will start transmitting.
|
||||
*/
|
||||
axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
|
||||
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
|
||||
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
|
||||
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
|
||||
cr | XAXIDMA_CR_RUNSTOP_MASK);
|
||||
|
@ -437,9 +494,10 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
|
|||
lp->options |= options;
|
||||
}
|
||||
|
||||
static void __axienet_device_reset(struct axienet_local *lp)
|
||||
static int __axienet_device_reset(struct axienet_local *lp)
|
||||
{
|
||||
u32 timeout;
|
||||
|
||||
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
|
||||
* process of Axi DMA takes a while to complete as all pending
|
||||
* commands/transfers will be flushed or completed during this
|
||||
|
@ -455,9 +513,11 @@ static void __axienet_device_reset(struct axienet_local *lp)
|
|||
if (--timeout == 0) {
|
||||
netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
|
||||
__func__);
|
||||
break;
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -470,13 +530,17 @@ static void __axienet_device_reset(struct axienet_local *lp)
|
|||
* areconnected to Axi Ethernet reset lines, this in turn resets the Axi
|
||||
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
|
||||
* core.
|
||||
* Returns 0 on success or a negative error number otherwise.
|
||||
*/
|
||||
static void axienet_device_reset(struct net_device *ndev)
|
||||
static int axienet_device_reset(struct net_device *ndev)
|
||||
{
|
||||
u32 axienet_status;
|
||||
struct axienet_local *lp = netdev_priv(ndev);
|
||||
int ret;
|
||||
|
||||
__axienet_device_reset(lp);
|
||||
ret = __axienet_device_reset(lp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
|
||||
lp->options |= XAE_OPTION_VLAN;
|
||||
|
@ -491,9 +555,11 @@ static void axienet_device_reset(struct net_device *ndev)
|
|||
lp->options |= XAE_OPTION_JUMBO;
|
||||
}
|
||||
|
||||
if (axienet_dma_bd_init(ndev)) {
|
||||
ret = axienet_dma_bd_init(ndev);
|
||||
if (ret) {
|
||||
netdev_err(ndev, "%s: descriptor allocation failed\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
|
||||
|
@ -518,6 +584,66 @@ static void axienet_device_reset(struct net_device *ndev)
|
|||
axienet_setoptions(ndev, lp->options);
|
||||
|
||||
netif_trans_update(ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* axienet_free_tx_chain - Clean up a series of linked TX descriptors.
|
||||
* @ndev: Pointer to the net_device structure
|
||||
* @first_bd: Index of first descriptor to clean up
|
||||
* @nr_bds: Number of descriptors to clean up, can be -1 if unknown.
|
||||
* @sizep: Pointer to a u32 filled with the total sum of all bytes
|
||||
* in all cleaned-up descriptors. Ignored if NULL.
|
||||
*
|
||||
* Would either be called after a successful transmit operation, or after
|
||||
* there was an error when setting up the chain.
|
||||
* Returns the number of descriptors handled.
|
||||
*/
|
||||
static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
|
||||
int nr_bds, u32 *sizep)
|
||||
{
|
||||
struct axienet_local *lp = netdev_priv(ndev);
|
||||
struct axidma_bd *cur_p;
|
||||
int max_bds = nr_bds;
|
||||
unsigned int status;
|
||||
dma_addr_t phys;
|
||||
int i;
|
||||
|
||||
if (max_bds == -1)
|
||||
max_bds = lp->tx_bd_num;
|
||||
|
||||
for (i = 0; i < max_bds; i++) {
|
||||
cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
|
||||
status = cur_p->status;
|
||||
|
||||
/* If no number is given, clean up *all* descriptors that have
|
||||
* been completed by the MAC.
|
||||
*/
|
||||
if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
|
||||
break;
|
||||
|
||||
phys = desc_get_phys_addr(lp, cur_p);
|
||||
dma_unmap_single(ndev->dev.parent, phys,
|
||||
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
|
||||
dev_consume_skb_irq(cur_p->skb);
|
||||
|
||||
cur_p->cntrl = 0;
|
||||
cur_p->app0 = 0;
|
||||
cur_p->app1 = 0;
|
||||
cur_p->app2 = 0;
|
||||
cur_p->app4 = 0;
|
||||
cur_p->status = 0;
|
||||
cur_p->skb = NULL;
|
||||
|
||||
if (sizep)
|
||||
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -533,36 +659,15 @@ static void axienet_device_reset(struct net_device *ndev)
|
|||
*/
|
||||
static void axienet_start_xmit_done(struct net_device *ndev)
|
||||
{
|
||||
u32 size = 0;
|
||||
u32 packets = 0;
|
||||
struct axienet_local *lp = netdev_priv(ndev);
|
||||
struct axidma_bd *cur_p;
|
||||
unsigned int status = 0;
|
||||
u32 packets = 0;
|
||||
u32 size = 0;
|
||||
|
||||
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
|
||||
status = cur_p->status;
|
||||
while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
|
||||
dma_unmap_single(ndev->dev.parent, cur_p->phys,
|
||||
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
|
||||
DMA_TO_DEVICE);
|
||||
if (cur_p->skb)
|
||||
dev_consume_skb_irq(cur_p->skb);
|
||||
/*cur_p->phys = 0;*/
|
||||
cur_p->app0 = 0;
|
||||
cur_p->app1 = 0;
|
||||
cur_p->app2 = 0;
|
||||
cur_p->app4 = 0;
|
||||
cur_p->status = 0;
|
||||
cur_p->skb = NULL;
|
||||
packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
|
||||
|
||||
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
|
||||
packets++;
|
||||
|
||||
if (++lp->tx_bd_ci >= lp->tx_bd_num)
|
||||
lp->tx_bd_ci = 0;
|
||||
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
|
||||
status = cur_p->status;
|
||||
}
|
||||
lp->tx_bd_ci += packets;
|
||||
if (lp->tx_bd_ci >= lp->tx_bd_num)
|
||||
lp->tx_bd_ci -= lp->tx_bd_num;
|
||||
|
||||
ndev->stats.tx_packets += packets;
|
||||
ndev->stats.tx_bytes += size;
|
||||
|
@ -617,9 +722,10 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
u32 csum_start_off;
|
||||
u32 csum_index_off;
|
||||
skb_frag_t *frag;
|
||||
dma_addr_t tail_p;
|
||||
dma_addr_t tail_p, phys;
|
||||
struct axienet_local *lp = netdev_priv(ndev);
|
||||
struct axidma_bd *cur_p;
|
||||
u32 orig_tail_ptr = lp->tx_bd_tail;
|
||||
|
||||
num_frag = skb_shinfo(skb)->nr_frags;
|
||||
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
|
||||
|
@ -655,19 +761,37 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
|
||||
}
|
||||
|
||||
phys = dma_map_single(ndev->dev.parent, skb->data,
|
||||
skb_headlen(skb), DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(ndev, "TX DMA mapping error\n");
|
||||
ndev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
desc_set_phys_addr(lp, phys, cur_p);
|
||||
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
|
||||
cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
|
||||
skb_headlen(skb), DMA_TO_DEVICE);
|
||||
|
||||
for (ii = 0; ii < num_frag; ii++) {
|
||||
if (++lp->tx_bd_tail >= lp->tx_bd_num)
|
||||
lp->tx_bd_tail = 0;
|
||||
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
|
||||
frag = &skb_shinfo(skb)->frags[ii];
|
||||
cur_p->phys = dma_map_single(ndev->dev.parent,
|
||||
skb_frag_address(frag),
|
||||
skb_frag_size(frag),
|
||||
DMA_TO_DEVICE);
|
||||
phys = dma_map_single(ndev->dev.parent,
|
||||
skb_frag_address(frag),
|
||||
skb_frag_size(frag),
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(ndev, "TX DMA mapping error\n");
|
||||
ndev->stats.tx_dropped++;
|
||||
axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
|
||||
NULL);
|
||||
lp->tx_bd_tail = orig_tail_ptr;
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
desc_set_phys_addr(lp, phys, cur_p);
|
||||
cur_p->cntrl = skb_frag_size(frag);
|
||||
}
|
||||
|
||||
|
@ -676,7 +800,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
|
||||
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
|
||||
/* Start the transfer */
|
||||
axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
|
||||
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
|
||||
if (++lp->tx_bd_tail >= lp->tx_bd_num)
|
||||
lp->tx_bd_tail = 0;
|
||||
|
||||
|
@ -706,10 +830,12 @@ static void axienet_recv(struct net_device *ndev)
|
|||
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
|
||||
|
||||
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
|
||||
dma_addr_t phys;
|
||||
|
||||
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
|
||||
|
||||
dma_unmap_single(ndev->dev.parent, cur_p->phys,
|
||||
lp->max_frm_size,
|
||||
phys = desc_get_phys_addr(lp, cur_p);
|
||||
dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
skb = cur_p->skb;
|
||||
|
@ -745,9 +871,17 @@ static void axienet_recv(struct net_device *ndev)
|
|||
if (!new_skb)
|
||||
return;
|
||||
|
||||
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
|
||||
lp->max_frm_size,
|
||||
DMA_FROM_DEVICE);
|
||||
phys = dma_map_single(ndev->dev.parent, new_skb->data,
|
||||
lp->max_frm_size,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(ndev, "RX DMA mapping error\n");
|
||||
dev_kfree_skb(new_skb);
|
||||
return;
|
||||
}
|
||||
desc_set_phys_addr(lp, phys, cur_p);
|
||||
|
||||
cur_p->cntrl = lp->max_frm_size;
|
||||
cur_p->status = 0;
|
||||
cur_p->skb = new_skb;
|
||||
|
@ -761,7 +895,7 @@ static void axienet_recv(struct net_device *ndev)
|
|||
ndev->stats.rx_bytes += size;
|
||||
|
||||
if (tail_p)
|
||||
axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
|
||||
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -791,7 +925,8 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
|
|||
return IRQ_NONE;
|
||||
if (status & XAXIDMA_IRQ_ERROR_MASK) {
|
||||
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
|
||||
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
|
||||
dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
|
||||
(lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
|
||||
(lp->tx_bd_v[lp->tx_bd_ci]).phys);
|
||||
|
||||
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
|
||||
|
@ -806,7 +941,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
|
|||
/* Write to the Rx channel control register */
|
||||
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
|
||||
|
||||
tasklet_schedule(&lp->dma_err_tasklet);
|
||||
schedule_work(&lp->dma_err_task);
|
||||
axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
|
||||
}
|
||||
out:
|
||||
|
@ -840,7 +975,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
|
|||
return IRQ_NONE;
|
||||
if (status & XAXIDMA_IRQ_ERROR_MASK) {
|
||||
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
|
||||
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
|
||||
dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
|
||||
(lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
|
||||
(lp->rx_bd_v[lp->rx_bd_ci]).phys);
|
||||
|
||||
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
|
||||
|
@ -855,7 +991,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
|
|||
/* write to the Rx channel control register */
|
||||
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
|
||||
|
||||
tasklet_schedule(&lp->dma_err_tasklet);
|
||||
schedule_work(&lp->dma_err_task);
|
||||
axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
|
||||
}
|
||||
out:
|
||||
|
@ -891,7 +1027,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void axienet_dma_err_handler(unsigned long data);
|
||||
static void axienet_dma_err_handler(struct work_struct *work);
|
||||
|
||||
/**
|
||||
* axienet_open - Driver open routine.
|
||||
|
@ -921,8 +1057,9 @@ static int axienet_open(struct net_device *ndev)
|
|||
*/
|
||||
mutex_lock(&lp->mii_bus->mdio_lock);
|
||||
axienet_mdio_disable(lp);
|
||||
axienet_device_reset(ndev);
|
||||
ret = axienet_mdio_enable(lp);
|
||||
ret = axienet_device_reset(ndev);
|
||||
if (ret == 0)
|
||||
ret = axienet_mdio_enable(lp);
|
||||
mutex_unlock(&lp->mii_bus->mdio_lock);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -935,9 +1072,8 @@ static int axienet_open(struct net_device *ndev)
|
|||
|
||||
phylink_start(lp->phylink);
|
||||
|
||||
/* Enable tasklets for Axi DMA error handling */
|
||||
tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
|
||||
(unsigned long) lp);
|
||||
/* Enable worker thread for Axi DMA error handling */
|
||||
INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
|
||||
|
||||
/* Enable interrupts for Axi DMA Tx */
|
||||
ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
|
||||
|
@ -966,7 +1102,7 @@ err_rx_irq:
|
|||
err_tx_irq:
|
||||
phylink_stop(lp->phylink);
|
||||
phylink_disconnect_phy(lp->phylink);
|
||||
tasklet_kill(&lp->dma_err_tasklet);
|
||||
cancel_work_sync(&lp->dma_err_task);
|
||||
dev_err(lp->dev, "request_irq() failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
@ -1025,7 +1161,7 @@ static int axienet_stop(struct net_device *ndev)
|
|||
axienet_mdio_enable(lp);
|
||||
mutex_unlock(&lp->mii_bus->mdio_lock);
|
||||
|
||||
tasklet_kill(&lp->dma_err_tasklet);
|
||||
cancel_work_sync(&lp->dma_err_task);
|
||||
|
||||
if (lp->eth_irq > 0)
|
||||
free_irq(lp->eth_irq, ndev);
|
||||
|
@ -1083,6 +1219,16 @@ static void axienet_poll_controller(struct net_device *ndev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
||||
{
|
||||
struct axienet_local *lp = netdev_priv(dev);
|
||||
|
||||
if (!netif_running(dev))
|
||||
return -EINVAL;
|
||||
|
||||
return phylink_mii_ioctl(lp->phylink, rq, cmd);
|
||||
}
|
||||
|
||||
static const struct net_device_ops axienet_netdev_ops = {
|
||||
.ndo_open = axienet_open,
|
||||
.ndo_stop = axienet_stop,
|
||||
|
@ -1090,6 +1236,7 @@ static const struct net_device_ops axienet_netdev_ops = {
|
|||
.ndo_change_mtu = axienet_change_mtu,
|
||||
.ndo_set_mac_address = netdev_set_mac_address,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_do_ioctl = axienet_ioctl,
|
||||
.ndo_set_rx_mode = axienet_set_multicast_list,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = axienet_poll_controller,
|
||||
|
@ -1170,10 +1317,6 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
|
|||
data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
|
||||
data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
|
||||
data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
|
||||
data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
|
||||
data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
|
||||
data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
|
||||
data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
|
||||
data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
|
||||
data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
|
||||
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
|
||||
|
@ -1484,17 +1627,18 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
|
|||
};
|
||||
|
||||
/**
|
||||
* axienet_dma_err_handler - Tasklet handler for Axi DMA Error
|
||||
* @data: Data passed
|
||||
* axienet_dma_err_handler - Work queue task for Axi DMA Error
|
||||
* @work: pointer to work_struct
|
||||
*
|
||||
* Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
|
||||
* Tx/Rx BDs.
|
||||
*/
|
||||
static void axienet_dma_err_handler(unsigned long data)
|
||||
static void axienet_dma_err_handler(struct work_struct *work)
|
||||
{
|
||||
u32 axienet_status;
|
||||
u32 cr, i;
|
||||
struct axienet_local *lp = (struct axienet_local *) data;
|
||||
struct axienet_local *lp = container_of(work, struct axienet_local,
|
||||
dma_err_task);
|
||||
struct net_device *ndev = lp->ndev;
|
||||
struct axidma_bd *cur_p;
|
||||
|
||||
|
@ -1514,14 +1658,18 @@ static void axienet_dma_err_handler(unsigned long data)
|
|||
|
||||
for (i = 0; i < lp->tx_bd_num; i++) {
|
||||
cur_p = &lp->tx_bd_v[i];
|
||||
if (cur_p->phys)
|
||||
dma_unmap_single(ndev->dev.parent, cur_p->phys,
|
||||
if (cur_p->cntrl) {
|
||||
dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
|
||||
|
||||
dma_unmap_single(ndev->dev.parent, addr,
|
||||
(cur_p->cntrl &
|
||||
XAXIDMA_BD_CTRL_LENGTH_MASK),
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
if (cur_p->skb)
|
||||
dev_kfree_skb_irq(cur_p->skb);
|
||||
cur_p->phys = 0;
|
||||
cur_p->phys_msb = 0;
|
||||
cur_p->cntrl = 0;
|
||||
cur_p->status = 0;
|
||||
cur_p->app0 = 0;
|
||||
|
@ -1575,18 +1723,18 @@ static void axienet_dma_err_handler(unsigned long data)
|
|||
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
|
||||
* halted state. This will make the Rx side ready for reception.
|
||||
*/
|
||||
axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
|
||||
axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
|
||||
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
|
||||
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
|
||||
cr | XAXIDMA_CR_RUNSTOP_MASK);
|
||||
axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
|
||||
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
|
||||
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
|
||||
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
|
||||
|
||||
/* Write to the RS (Run-stop) bit in the Tx channel control register.
|
||||
* Tx channel is now ready to run. But only after we write to the
|
||||
* tail pointer register that the Tx channel will start transmitting
|
||||
*/
|
||||
axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
|
||||
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
|
||||
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
|
||||
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
|
||||
cr | XAXIDMA_CR_RUNSTOP_MASK);
|
||||
|
@ -1632,6 +1780,7 @@ static int axienet_probe(struct platform_device *pdev)
|
|||
struct net_device *ndev;
|
||||
const void *mac_addr;
|
||||
struct resource *ethres;
|
||||
int addr_width = 32;
|
||||
u32 value;
|
||||
|
||||
ndev = alloc_etherdev(sizeof(*lp));
|
||||
|
@ -1762,7 +1911,7 @@ static int axienet_probe(struct platform_device *pdev)
|
|||
lp->rx_irq = irq_of_parse_and_map(np, 1);
|
||||
lp->tx_irq = irq_of_parse_and_map(np, 0);
|
||||
of_node_put(np);
|
||||
lp->eth_irq = platform_get_irq(pdev, 0);
|
||||
lp->eth_irq = platform_get_irq_optional(pdev, 0);
|
||||
} else {
|
||||
/* Check for these resources directly on the Ethernet node. */
|
||||
struct resource *res = platform_get_resource(pdev,
|
||||
|
@ -1770,7 +1919,7 @@ static int axienet_probe(struct platform_device *pdev)
|
|||
lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
lp->rx_irq = platform_get_irq(pdev, 1);
|
||||
lp->tx_irq = platform_get_irq(pdev, 0);
|
||||
lp->eth_irq = platform_get_irq(pdev, 2);
|
||||
lp->eth_irq = platform_get_irq_optional(pdev, 2);
|
||||
}
|
||||
if (IS_ERR(lp->dma_regs)) {
|
||||
dev_err(&pdev->dev, "could not map DMA regs\n");
|
||||
|
@ -1783,6 +1932,36 @@ static int axienet_probe(struct platform_device *pdev)
|
|||
goto free_netdev;
|
||||
}
|
||||
|
||||
/* Autodetect the need for 64-bit DMA pointers.
|
||||
* When the IP is configured for a bus width bigger than 32 bits,
|
||||
* writing the MSB registers is mandatory, even if they are all 0.
|
||||
* We can detect this case by writing all 1's to one such register
|
||||
* and see if that sticks: when the IP is configured for 32 bits
|
||||
* only, those registers are RES0.
|
||||
* Those MSB registers were introduced in IP v7.1, which we check first.
|
||||
*/
|
||||
if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
|
||||
void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
|
||||
|
||||
iowrite32(0x0, desc);
|
||||
if (ioread32(desc) == 0) { /* sanity check */
|
||||
iowrite32(0xffffffff, desc);
|
||||
if (ioread32(desc) > 0) {
|
||||
lp->features |= XAE_FEATURE_DMA_64BIT;
|
||||
addr_width = 64;
|
||||
dev_info(&pdev->dev,
|
||||
"autodetected 64-bit DMA range\n");
|
||||
}
|
||||
iowrite32(0x0, desc);
|
||||
}
|
||||
}
|
||||
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "No suitable DMA available\n");
|
||||
goto free_netdev;
|
||||
}
|
||||
|
||||
/* Check for Ethernet core IRQ (optional) */
|
||||
if (lp->eth_irq <= 0)
|
||||
dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
|
||||
|
|
Loading…
Reference in New Issue