2006-11-09 21:51:17 +08:00
|
|
|
/*
|
2011-11-08 18:12:32 +08:00
|
|
|
* Cadence MACB/GEM Ethernet Controller driver
|
2006-11-09 21:51:17 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2004-2006 Atmel Corporation
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
2011-03-09 04:27:08 +08:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
2006-11-09 21:51:17 +08:00
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
2012-11-19 14:00:21 +08:00
|
|
|
#include <linux/circ_buf.h>
|
2006-11-09 21:51:17 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/init.h>
|
2012-11-11 21:56:27 +08:00
|
|
|
#include <linux/gpio.h>
|
2011-06-06 18:43:46 +08:00
|
|
|
#include <linux/interrupt.h>
|
2006-11-09 21:51:17 +08:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2011-03-09 04:17:06 +08:00
|
|
|
#include <linux/platform_data/macb.h>
|
2006-11-09 21:51:17 +08:00
|
|
|
#include <linux/platform_device.h>
|
2007-07-13 01:07:24 +08:00
|
|
|
#include <linux/phy.h>
|
2011-12-21 05:13:07 +08:00
|
|
|
#include <linux/of.h>
|
2011-11-18 22:29:25 +08:00
|
|
|
#include <linux/of_device.h>
|
2013-08-22 23:57:28 +08:00
|
|
|
#include <linux/of_mdio.h>
|
2011-11-18 22:29:25 +08:00
|
|
|
#include <linux/of_net.h>
|
2012-10-31 14:04:59 +08:00
|
|
|
#include <linux/pinctrl/consumer.h>
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
#include "macb.h"
|
|
|
|
|
2013-06-05 05:57:11 +08:00
|
|
|
#define MACB_RX_BUFFER_SIZE 128
|
|
|
|
#define RX_BUFFER_MULTIPLE 64 /* bytes */
|
2012-10-31 14:04:55 +08:00
|
|
|
#define RX_RING_SIZE 512 /* must be power of 2 */
|
|
|
|
#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
#define TX_RING_SIZE 128 /* must be power of 2 */
|
|
|
|
#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-11-19 14:00:21 +08:00
|
|
|
/* level of occupied TX descriptors under which we wake up TX process */
|
|
|
|
#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
|
|
|
|
| MACB_BIT(ISR_ROVR))
|
2012-10-31 14:04:57 +08:00
|
|
|
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
|
|
|
|
| MACB_BIT(ISR_RLE) \
|
|
|
|
| MACB_BIT(TXERR))
|
|
|
|
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Graceful stop timeouts in us. We should allow up to
|
|
|
|
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
|
|
|
|
*/
|
|
|
|
#define MACB_HALT_TIMEOUT 1230
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
/* Ring buffer accessors */
|
|
|
|
static unsigned int macb_tx_ring_wrap(unsigned int index)
|
|
|
|
{
|
|
|
|
return index & (TX_RING_SIZE - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
|
|
|
|
{
|
|
|
|
return &bp->tx_ring[macb_tx_ring_wrap(index)];
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
|
|
|
|
{
|
|
|
|
return &bp->tx_skb[macb_tx_ring_wrap(index)];
|
|
|
|
}
|
|
|
|
|
|
|
|
static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
|
|
|
|
{
|
|
|
|
dma_addr_t offset;
|
|
|
|
|
|
|
|
offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
|
|
|
|
|
|
|
|
return bp->tx_ring_dma + offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int macb_rx_ring_wrap(unsigned int index)
|
|
|
|
{
|
|
|
|
return index & (RX_RING_SIZE - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
|
|
|
|
{
|
|
|
|
return &bp->rx_ring[macb_rx_ring_wrap(index)];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *macb_rx_buffer(struct macb *bp, unsigned int index)
|
|
|
|
{
|
2013-06-05 05:57:11 +08:00
|
|
|
return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
|
2012-10-31 14:04:55 +08:00
|
|
|
}
|
|
|
|
|
2012-11-07 16:14:52 +08:00
|
|
|
void macb_set_hwaddr(struct macb *bp)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
|
|
|
u32 bottom;
|
|
|
|
u16 top;
|
|
|
|
|
|
|
|
bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
|
2011-11-08 18:12:32 +08:00
|
|
|
macb_or_gem_writel(bp, SA1B, bottom);
|
2006-11-09 21:51:17 +08:00
|
|
|
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
|
2011-11-08 18:12:32 +08:00
|
|
|
macb_or_gem_writel(bp, SA1T, top);
|
2012-11-11 21:56:28 +08:00
|
|
|
|
|
|
|
/* Clear unused address register sets */
|
|
|
|
macb_or_gem_writel(bp, SA2B, 0);
|
|
|
|
macb_or_gem_writel(bp, SA2T, 0);
|
|
|
|
macb_or_gem_writel(bp, SA3B, 0);
|
|
|
|
macb_or_gem_writel(bp, SA3T, 0);
|
|
|
|
macb_or_gem_writel(bp, SA4B, 0);
|
|
|
|
macb_or_gem_writel(bp, SA4T, 0);
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
2012-11-07 16:14:52 +08:00
|
|
|
EXPORT_SYMBOL_GPL(macb_set_hwaddr);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-11-07 16:14:52 +08:00
|
|
|
void macb_get_hwaddr(struct macb *bp)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
2012-11-07 16:14:51 +08:00
|
|
|
struct macb_platform_data *pdata;
|
2006-11-09 21:51:17 +08:00
|
|
|
u32 bottom;
|
|
|
|
u16 top;
|
|
|
|
u8 addr[6];
|
2012-11-07 16:14:50 +08:00
|
|
|
int i;
|
|
|
|
|
2013-08-30 13:12:21 +08:00
|
|
|
pdata = dev_get_platdata(&bp->pdev->dev);
|
2012-11-07 16:14:51 +08:00
|
|
|
|
2012-11-07 16:14:50 +08:00
|
|
|
/* Check all 4 address register for vaild address */
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
bottom = macb_or_gem_readl(bp, SA1B + i * 8);
|
|
|
|
top = macb_or_gem_readl(bp, SA1T + i * 8);
|
|
|
|
|
2012-11-07 16:14:51 +08:00
|
|
|
if (pdata && pdata->rev_eth_addr) {
|
|
|
|
addr[5] = bottom & 0xff;
|
|
|
|
addr[4] = (bottom >> 8) & 0xff;
|
|
|
|
addr[3] = (bottom >> 16) & 0xff;
|
|
|
|
addr[2] = (bottom >> 24) & 0xff;
|
|
|
|
addr[1] = top & 0xff;
|
|
|
|
addr[0] = (top & 0xff00) >> 8;
|
|
|
|
} else {
|
|
|
|
addr[0] = bottom & 0xff;
|
|
|
|
addr[1] = (bottom >> 8) & 0xff;
|
|
|
|
addr[2] = (bottom >> 16) & 0xff;
|
|
|
|
addr[3] = (bottom >> 24) & 0xff;
|
|
|
|
addr[4] = top & 0xff;
|
|
|
|
addr[5] = (top >> 8) & 0xff;
|
|
|
|
}
|
2012-11-07 16:14:50 +08:00
|
|
|
|
|
|
|
if (is_valid_ether_addr(addr)) {
|
|
|
|
memcpy(bp->dev->dev_addr, addr, sizeof(addr));
|
|
|
|
return;
|
|
|
|
}
|
2008-06-10 07:33:57 +08:00
|
|
|
}
|
2012-11-07 16:14:50 +08:00
|
|
|
|
|
|
|
netdev_info(bp->dev, "invalid hw address, using random\n");
|
|
|
|
eth_hw_addr_random(bp->dev);
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
2012-11-07 16:14:52 +08:00
|
|
|
EXPORT_SYMBOL_GPL(macb_get_hwaddr);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
2007-07-13 01:07:24 +08:00
|
|
|
struct macb *bp = bus->priv;
|
2006-11-09 21:51:17 +08:00
|
|
|
int value;
|
|
|
|
|
|
|
|
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
|
|
|
|
| MACB_BF(RW, MACB_MAN_READ)
|
2007-07-13 01:07:24 +08:00
|
|
|
| MACB_BF(PHYA, mii_id)
|
|
|
|
| MACB_BF(REGA, regnum)
|
2006-11-09 21:51:17 +08:00
|
|
|
| MACB_BF(CODE, MACB_MAN_CODE)));
|
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
/* wait for end of transfer */
|
|
|
|
while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
|
|
|
|
cpu_relax();
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
|
|
|
|
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
|
|
|
|
u16 value)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
2007-07-13 01:07:24 +08:00
|
|
|
struct macb *bp = bus->priv;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
|
|
|
|
| MACB_BF(RW, MACB_MAN_WRITE)
|
2007-07-13 01:07:24 +08:00
|
|
|
| MACB_BF(PHYA, mii_id)
|
|
|
|
| MACB_BF(REGA, regnum)
|
2006-11-09 21:51:17 +08:00
|
|
|
| MACB_BF(CODE, MACB_MAN_CODE)
|
2007-07-13 01:07:24 +08:00
|
|
|
| MACB_BF(DATA, value)));
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
/* wait for end of transfer */
|
|
|
|
while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
static int macb_mdio_reset(struct mii_bus *bus)
|
|
|
|
{
|
|
|
|
return 0;
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
static void macb_handle_link_change(struct net_device *dev)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
2007-07-13 01:07:24 +08:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
struct phy_device *phydev = bp->phy_dev;
|
|
|
|
unsigned long flags;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
int status_change = 0;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
|
|
|
|
|
|
|
if (phydev->link) {
|
|
|
|
if ((bp->speed != phydev->speed) ||
|
|
|
|
(bp->duplex != phydev->duplex)) {
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = macb_readl(bp, NCFGR);
|
|
|
|
reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
|
2012-10-31 14:04:50 +08:00
|
|
|
if (macb_is_gem(bp))
|
|
|
|
reg &= ~GEM_BIT(GBE);
|
2007-07-13 01:07:24 +08:00
|
|
|
|
|
|
|
if (phydev->duplex)
|
|
|
|
reg |= MACB_BIT(FD);
|
2008-02-21 21:50:54 +08:00
|
|
|
if (phydev->speed == SPEED_100)
|
2007-07-13 01:07:24 +08:00
|
|
|
reg |= MACB_BIT(SPD);
|
2012-10-31 14:04:50 +08:00
|
|
|
if (phydev->speed == SPEED_1000)
|
|
|
|
reg |= GEM_BIT(GBE);
|
2007-07-13 01:07:24 +08:00
|
|
|
|
2012-10-31 14:04:50 +08:00
|
|
|
macb_or_gem_writel(bp, NCFGR, reg);
|
2007-07-13 01:07:24 +08:00
|
|
|
|
|
|
|
bp->speed = phydev->speed;
|
|
|
|
bp->duplex = phydev->duplex;
|
|
|
|
status_change = 1;
|
|
|
|
}
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
if (phydev->link != bp->link) {
|
2008-07-23 06:41:24 +08:00
|
|
|
if (!phydev->link) {
|
2007-07-13 01:07:24 +08:00
|
|
|
bp->speed = 0;
|
|
|
|
bp->duplex = -1;
|
|
|
|
}
|
|
|
|
bp->link = phydev->link;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
status_change = 1;
|
|
|
|
}
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
|
|
|
|
|
|
if (status_change) {
|
2012-07-04 07:14:13 +08:00
|
|
|
if (phydev->link) {
|
|
|
|
netif_carrier_on(dev);
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_info(dev, "link up (%d/%s)\n",
|
|
|
|
phydev->speed,
|
|
|
|
phydev->duplex == DUPLEX_FULL ?
|
|
|
|
"Full" : "Half");
|
2012-07-04 07:14:13 +08:00
|
|
|
} else {
|
|
|
|
netif_carrier_off(dev);
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_info(dev, "link down\n");
|
2012-07-04 07:14:13 +08:00
|
|
|
}
|
2007-07-13 01:07:24 +08:00
|
|
|
}
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
/* based on au1000_eth. c*/
|
|
|
|
static int macb_mii_probe(struct net_device *dev)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
2007-07-13 01:07:24 +08:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
2012-11-11 21:56:27 +08:00
|
|
|
struct macb_platform_data *pdata;
|
2010-02-08 13:12:08 +08:00
|
|
|
struct phy_device *phydev;
|
2012-11-11 21:56:27 +08:00
|
|
|
int phy_irq;
|
2010-02-08 13:12:08 +08:00
|
|
|
int ret;
|
2007-07-13 01:07:24 +08:00
|
|
|
|
2010-02-08 13:12:08 +08:00
|
|
|
phydev = phy_find_first(bp->mii_bus);
|
2007-07-13 01:07:24 +08:00
|
|
|
if (!phydev) {
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_err(dev, "no PHY found\n");
|
2013-08-27 20:36:14 +08:00
|
|
|
return -ENXIO;
|
2007-07-13 01:07:24 +08:00
|
|
|
}
|
|
|
|
|
2012-11-11 21:56:27 +08:00
|
|
|
pdata = dev_get_platdata(&bp->pdev->dev);
|
|
|
|
if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
|
|
|
|
ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
|
|
|
|
if (!ret) {
|
|
|
|
phy_irq = gpio_to_irq(pdata->phy_irq_pin);
|
|
|
|
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
|
|
|
|
}
|
|
|
|
}
|
2007-07-13 01:07:24 +08:00
|
|
|
|
|
|
|
/* attach the mac to the phy */
|
2013-01-14 08:52:52 +08:00
|
|
|
ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
|
2011-11-18 22:29:25 +08:00
|
|
|
bp->phy_interface);
|
2010-02-08 13:12:08 +08:00
|
|
|
if (ret) {
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_err(dev, "Could not attach to PHY\n");
|
2010-02-08 13:12:08 +08:00
|
|
|
return ret;
|
2007-07-13 01:07:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* mask with MAC supported features */
|
2012-10-31 14:04:50 +08:00
|
|
|
if (macb_is_gem(bp))
|
|
|
|
phydev->supported &= PHY_GBIT_FEATURES;
|
|
|
|
else
|
|
|
|
phydev->supported &= PHY_BASIC_FEATURES;
|
2007-07-13 01:07:24 +08:00
|
|
|
|
|
|
|
phydev->advertising = phydev->supported;
|
|
|
|
|
|
|
|
bp->link = 0;
|
|
|
|
bp->speed = 0;
|
|
|
|
bp->duplex = -1;
|
|
|
|
bp->phy_dev = phydev;
|
|
|
|
|
|
|
|
return 0;
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
2012-10-18 19:01:12 +08:00
|
|
|
int macb_mii_init(struct macb *bp)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
2011-03-09 04:17:06 +08:00
|
|
|
struct macb_platform_data *pdata;
|
2013-08-22 23:57:28 +08:00
|
|
|
struct device_node *np;
|
2007-07-13 01:07:24 +08:00
|
|
|
int err = -ENXIO, i;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2009-07-23 14:31:31 +08:00
|
|
|
/* Enable management port */
|
2007-07-13 01:07:24 +08:00
|
|
|
macb_writel(bp, NCR, MACB_BIT(MPE));
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2008-10-09 07:29:57 +08:00
|
|
|
bp->mii_bus = mdiobus_alloc();
|
|
|
|
if (bp->mii_bus == NULL) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
bp->mii_bus->name = "MACB_mii_bus";
|
|
|
|
bp->mii_bus->read = &macb_mdio_read;
|
|
|
|
bp->mii_bus->write = &macb_mdio_write;
|
|
|
|
bp->mii_bus->reset = &macb_mdio_reset;
|
2012-01-10 07:59:11 +08:00
|
|
|
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
|
|
|
bp->pdev->name, bp->pdev->id);
|
2008-10-09 07:29:57 +08:00
|
|
|
bp->mii_bus->priv = bp;
|
|
|
|
bp->mii_bus->parent = &bp->dev->dev;
|
2013-08-30 13:12:21 +08:00
|
|
|
pdata = dev_get_platdata(&bp->pdev->dev);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2008-10-09 07:29:57 +08:00
|
|
|
bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
|
|
|
|
if (!bp->mii_bus->irq) {
|
2007-07-13 01:07:24 +08:00
|
|
|
err = -ENOMEM;
|
2008-10-09 07:29:57 +08:00
|
|
|
goto err_out_free_mdiobus;
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
2011-02-28 12:05:25 +08:00
|
|
|
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2013-08-22 23:57:28 +08:00
|
|
|
np = bp->pdev->dev.of_node;
|
|
|
|
if (np) {
|
|
|
|
/* try dt phy registration */
|
|
|
|
err = of_mdiobus_register(bp->mii_bus, np);
|
|
|
|
|
|
|
|
/* fallback to standard phy registration if no phy were
|
|
|
|
found during dt phy registration */
|
|
|
|
if (!err && !phy_find_first(bp->mii_bus)) {
|
|
|
|
for (i = 0; i < PHY_MAX_ADDR; i++) {
|
|
|
|
struct phy_device *phydev;
|
|
|
|
|
|
|
|
phydev = mdiobus_scan(bp->mii_bus, i);
|
|
|
|
if (IS_ERR(phydev)) {
|
|
|
|
err = PTR_ERR(phydev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto err_out_unregister_bus;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < PHY_MAX_ADDR; i++)
|
|
|
|
bp->mii_bus->irq[i] = PHY_POLL;
|
|
|
|
|
|
|
|
if (pdata)
|
|
|
|
bp->mii_bus->phy_mask = pdata->phy_mask;
|
|
|
|
|
|
|
|
err = mdiobus_register(bp->mii_bus);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
2007-07-13 01:07:24 +08:00
|
|
|
goto err_out_free_mdio_irq;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2013-08-27 20:36:14 +08:00
|
|
|
err = macb_mii_probe(bp->dev);
|
|
|
|
if (err)
|
2007-07-13 01:07:24 +08:00
|
|
|
goto err_out_unregister_bus;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
return 0;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
err_out_unregister_bus:
|
2008-10-09 07:29:57 +08:00
|
|
|
mdiobus_unregister(bp->mii_bus);
|
2007-07-13 01:07:24 +08:00
|
|
|
err_out_free_mdio_irq:
|
2008-10-09 07:29:57 +08:00
|
|
|
kfree(bp->mii_bus->irq);
|
|
|
|
err_out_free_mdiobus:
|
|
|
|
mdiobus_free(bp->mii_bus);
|
2007-07-13 01:07:24 +08:00
|
|
|
err_out:
|
|
|
|
return err;
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
2012-10-18 19:01:12 +08:00
|
|
|
EXPORT_SYMBOL_GPL(macb_mii_init);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
static void macb_update_stats(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 __iomem *reg = bp->regs + MACB_PFR;
|
2011-03-10 00:26:35 +08:00
|
|
|
u32 *p = &bp->hw_stats.macb.rx_pause_frames;
|
|
|
|
u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
|
|
|
|
|
|
|
|
for(; p < end; p++, reg++)
|
2006-12-08 21:38:30 +08:00
|
|
|
*p += __raw_readl(reg);
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
static int macb_halt_tx(struct macb *bp)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
2012-10-31 14:04:57 +08:00
|
|
|
unsigned long halt_time, timeout;
|
|
|
|
u32 status;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
|
|
|
|
do {
|
|
|
|
halt_time = jiffies;
|
|
|
|
status = macb_readl(bp, TSR);
|
|
|
|
if (!(status & MACB_BIT(TGO)))
|
|
|
|
return 0;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
usleep_range(10, 250);
|
|
|
|
} while (time_before(halt_time, timeout));
|
2007-12-20 01:23:44 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
2009-01-19 13:57:35 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
static void macb_tx_error_task(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct macb *bp = container_of(work, struct macb, tx_error_task);
|
|
|
|
struct macb_tx_skb *tx_skb;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned int tail;
|
2007-12-20 01:23:44 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
|
|
|
|
bp->tx_tail, bp->tx_head);
|
2007-12-20 01:23:44 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
/* Make sure nobody is trying to queue up new packets */
|
|
|
|
netif_stop_queue(bp->dev);
|
2011-08-04 06:11:47 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
/*
|
|
|
|
* Stop transmission now
|
|
|
|
* (in case we have just queued new packets)
|
|
|
|
*/
|
|
|
|
if (macb_halt_tx(bp))
|
|
|
|
/* Just complain for now, reinitializing TX path can be good */
|
|
|
|
netdev_err(bp->dev, "BUG: halt tx timed out\n");
|
2007-12-20 01:23:44 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
/* No need for the lock here as nobody will interrupt us anymore */
|
2007-12-20 01:23:44 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
/*
|
|
|
|
* Treat frames in TX queue including the ones that caused the error.
|
|
|
|
* Free transmit buffers in upper layer.
|
|
|
|
*/
|
|
|
|
for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
u32 ctrl;
|
2012-10-31 14:04:55 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
desc = macb_tx_desc(bp, tail);
|
|
|
|
ctrl = desc->ctrl;
|
|
|
|
tx_skb = macb_tx_skb(bp, tail);
|
|
|
|
skb = tx_skb->skb;
|
2007-12-20 01:23:44 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
if (ctrl & MACB_BIT(TX_USED)) {
|
|
|
|
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
|
|
|
|
macb_tx_ring_wrap(tail), skb->data);
|
|
|
|
bp->stats.tx_packets++;
|
|
|
|
bp->stats.tx_bytes += skb->len;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* "Buffers exhausted mid-frame" errors may only happen
|
|
|
|
* if the driver is buggy, so complain loudly about those.
|
|
|
|
* Statistics are updated by hardware.
|
|
|
|
*/
|
|
|
|
if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"BUG: TX buffers exhausted mid-frame\n");
|
2009-01-19 13:57:35 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
desc->ctrl = ctrl | MACB_BIT(TX_USED);
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
tx_skb->skb = NULL;
|
|
|
|
dev_kfree_skb(skb);
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
/* Reinitialize the TX desc queue */
|
|
|
|
macb_writel(bp, TBQP, bp->tx_ring_dma);
|
|
|
|
/* Make TX ring reflect state of hardware */
|
|
|
|
bp->tx_head = bp->tx_tail = 0;
|
|
|
|
|
|
|
|
/* Now we are ready to start transmission again */
|
|
|
|
netif_wake_queue(bp->dev);
|
|
|
|
|
|
|
|
/* Housework before enabling TX IRQ */
|
|
|
|
macb_writel(bp, TSR, macb_readl(bp, TSR));
|
|
|
|
macb_writel(bp, IER, MACB_TX_INT_FLAGS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_tx_interrupt(struct macb *bp)
|
|
|
|
{
|
|
|
|
unsigned int tail;
|
|
|
|
unsigned int head;
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = macb_readl(bp, TSR);
|
|
|
|
macb_writel(bp, TSR, status);
|
|
|
|
|
2013-05-14 11:00:16 +08:00
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
macb_writel(bp, ISR, MACB_BIT(TCOMP));
|
2013-03-28 07:07:05 +08:00
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
|
|
|
|
(unsigned long)status);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
head = bp->tx_head;
|
2012-10-31 14:04:55 +08:00
|
|
|
for (tail = bp->tx_tail; tail != head; tail++) {
|
|
|
|
struct macb_tx_skb *tx_skb;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
u32 ctrl;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
desc = macb_tx_desc(bp, tail);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:51 +08:00
|
|
|
/* Make hw descriptor updates visible to CPU */
|
2006-11-09 21:51:17 +08:00
|
|
|
rmb();
|
2012-10-31 14:04:51 +08:00
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
ctrl = desc->ctrl;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
if (!(ctrl & MACB_BIT(TX_USED)))
|
2006-11-09 21:51:17 +08:00
|
|
|
break;
|
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
tx_skb = macb_tx_skb(bp, tail);
|
|
|
|
skb = tx_skb->skb;
|
|
|
|
|
2012-10-31 14:04:52 +08:00
|
|
|
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
|
2012-10-31 14:04:55 +08:00
|
|
|
macb_tx_ring_wrap(tail), skb->data);
|
|
|
|
dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
|
2006-11-09 21:51:17 +08:00
|
|
|
DMA_TO_DEVICE);
|
|
|
|
bp->stats.tx_packets++;
|
|
|
|
bp->stats.tx_bytes += skb->len;
|
2012-10-31 14:04:55 +08:00
|
|
|
tx_skb->skb = NULL;
|
2006-11-09 21:51:17 +08:00
|
|
|
dev_kfree_skb_irq(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
bp->tx_tail = tail;
|
2012-10-31 14:04:55 +08:00
|
|
|
if (netif_queue_stopped(bp->dev)
|
2012-11-19 14:00:21 +08:00
|
|
|
&& CIRC_CNT(bp->tx_head, bp->tx_tail,
|
|
|
|
TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
|
2006-11-09 21:51:17 +08:00
|
|
|
netif_wake_queue(bp->dev);
|
|
|
|
}
|
|
|
|
|
2013-06-05 05:57:12 +08:00
|
|
|
static void gem_rx_refill(struct macb *bp)
|
|
|
|
{
|
|
|
|
unsigned int entry;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
dma_addr_t paddr;
|
|
|
|
|
|
|
|
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
|
|
|
|
u32 addr, ctrl;
|
|
|
|
|
|
|
|
entry = macb_rx_ring_wrap(bp->rx_prepared_head);
|
|
|
|
desc = &bp->rx_ring[entry];
|
|
|
|
|
|
|
|
/* Make hw descriptor updates visible to CPU */
|
|
|
|
rmb();
|
|
|
|
|
|
|
|
addr = desc->addr;
|
|
|
|
ctrl = desc->ctrl;
|
|
|
|
bp->rx_prepared_head++;
|
|
|
|
|
|
|
|
if ((addr & MACB_BIT(RX_USED)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (bp->rx_skbuff[entry] == NULL) {
|
|
|
|
/* allocate sk_buff for this free entry in ring */
|
|
|
|
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
|
|
|
|
if (unlikely(skb == NULL)) {
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"Unable to allocate sk_buff\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bp->rx_skbuff[entry] = skb;
|
|
|
|
|
|
|
|
/* now fill corresponding descriptor entry */
|
|
|
|
paddr = dma_map_single(&bp->pdev->dev, skb->data,
|
|
|
|
bp->rx_buffer_size, DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
if (entry == RX_RING_SIZE - 1)
|
|
|
|
paddr |= MACB_BIT(RX_WRAP);
|
|
|
|
bp->rx_ring[entry].addr = paddr;
|
|
|
|
bp->rx_ring[entry].ctrl = 0;
|
|
|
|
|
|
|
|
/* properly align Ethernet header */
|
|
|
|
skb_reserve(skb, NET_IP_ALIGN);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
|
|
|
|
bp->rx_prepared_head, bp->rx_tail);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark DMA descriptors from begin up to and not including end as unused */
|
|
|
|
static void discard_partial_frame(struct macb *bp, unsigned int begin,
|
|
|
|
unsigned int end)
|
|
|
|
{
|
|
|
|
unsigned int frag;
|
|
|
|
|
|
|
|
for (frag = begin; frag != end; frag++) {
|
|
|
|
struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
|
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When this happens, the hardware stats registers for
|
|
|
|
* whatever caused this is updated, so we don't have to record
|
|
|
|
* anything.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_rx(struct macb *bp, int budget)
|
|
|
|
{
|
|
|
|
unsigned int len;
|
|
|
|
unsigned int entry;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
while (count < budget) {
|
|
|
|
u32 addr, ctrl;
|
|
|
|
|
|
|
|
entry = macb_rx_ring_wrap(bp->rx_tail);
|
|
|
|
desc = &bp->rx_ring[entry];
|
|
|
|
|
|
|
|
/* Make hw descriptor updates visible to CPU */
|
|
|
|
rmb();
|
|
|
|
|
|
|
|
addr = desc->addr;
|
|
|
|
ctrl = desc->ctrl;
|
|
|
|
|
|
|
|
if (!(addr & MACB_BIT(RX_USED)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
|
|
|
bp->rx_tail++;
|
|
|
|
count++;
|
|
|
|
|
|
|
|
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"not whole frame pointed by descriptor\n");
|
|
|
|
bp->stats.rx_dropped++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
skb = bp->rx_skbuff[entry];
|
|
|
|
if (unlikely(!skb)) {
|
|
|
|
netdev_err(bp->dev,
|
|
|
|
"inconsistent Rx descriptor chain\n");
|
|
|
|
bp->stats.rx_dropped++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* now everything is ready for receiving packet */
|
|
|
|
bp->rx_skbuff[entry] = NULL;
|
|
|
|
len = MACB_BFEXT(RX_FRMLEN, ctrl);
|
|
|
|
|
|
|
|
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
|
|
|
|
|
|
|
|
skb_put(skb, len);
|
|
|
|
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
|
|
|
|
dma_unmap_single(&bp->pdev->dev, addr,
|
|
|
|
len, DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
skb->protocol = eth_type_trans(skb, bp->dev);
|
|
|
|
skb_checksum_none_assert(skb);
|
|
|
|
|
|
|
|
bp->stats.rx_packets++;
|
|
|
|
bp->stats.rx_bytes += skb->len;
|
|
|
|
|
|
|
|
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
|
|
|
|
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
|
|
|
|
skb->len, skb->csum);
|
|
|
|
print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
|
|
|
|
skb->mac_header, 16, true);
|
|
|
|
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
|
|
|
|
skb->data, 32, true);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
netif_receive_skb(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
gem_rx_refill(bp);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
|
|
|
|
unsigned int last_frag)
|
|
|
|
{
|
|
|
|
unsigned int len;
|
|
|
|
unsigned int frag;
|
2012-10-31 14:04:58 +08:00
|
|
|
unsigned int offset;
|
2006-11-09 21:51:17 +08:00
|
|
|
struct sk_buff *skb;
|
2012-10-31 14:04:55 +08:00
|
|
|
struct macb_dma_desc *desc;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
desc = macb_rx_desc(bp, last_frag);
|
|
|
|
len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:52 +08:00
|
|
|
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
|
2012-10-31 14:04:55 +08:00
|
|
|
macb_rx_ring_wrap(first_frag),
|
|
|
|
macb_rx_ring_wrap(last_frag), len);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:58 +08:00
|
|
|
/*
|
|
|
|
* The ethernet header starts NET_IP_ALIGN bytes into the
|
|
|
|
* first buffer. Since the header is 14 bytes, this makes the
|
|
|
|
* payload word-aligned.
|
|
|
|
*
|
|
|
|
* Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
|
|
|
|
* the two padding bytes into the skb so that we avoid hitting
|
|
|
|
* the slowpath in memcpy(), and pull them off afterwards.
|
|
|
|
*/
|
|
|
|
skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
|
2006-11-09 21:51:17 +08:00
|
|
|
if (!skb) {
|
|
|
|
bp->stats.rx_dropped++;
|
2012-10-31 14:04:55 +08:00
|
|
|
for (frag = first_frag; ; frag++) {
|
|
|
|
desc = macb_rx_desc(bp, frag);
|
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
2006-11-09 21:51:17 +08:00
|
|
|
if (frag == last_frag)
|
|
|
|
break;
|
|
|
|
}
|
2012-10-31 14:04:51 +08:00
|
|
|
|
|
|
|
/* Make descriptor updates visible to hardware */
|
2006-11-09 21:51:17 +08:00
|
|
|
wmb();
|
2012-10-31 14:04:51 +08:00
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-10-31 14:04:58 +08:00
|
|
|
offset = 0;
|
|
|
|
len += NET_IP_ALIGN;
|
2010-09-03 04:07:41 +08:00
|
|
|
skb_checksum_none_assert(skb);
|
2006-11-09 21:51:17 +08:00
|
|
|
skb_put(skb, len);
|
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
for (frag = first_frag; ; frag++) {
|
2013-06-05 05:57:11 +08:00
|
|
|
unsigned int frag_len = bp->rx_buffer_size;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
if (offset + frag_len > len) {
|
|
|
|
BUG_ON(frag != last_frag);
|
|
|
|
frag_len = len - offset;
|
|
|
|
}
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data_offset(skb, offset,
|
2012-10-31 14:04:55 +08:00
|
|
|
macb_rx_buffer(bp, frag), frag_len);
|
2013-06-05 05:57:11 +08:00
|
|
|
offset += bp->rx_buffer_size;
|
2012-10-31 14:04:55 +08:00
|
|
|
desc = macb_rx_desc(bp, frag);
|
|
|
|
desc->addr &= ~MACB_BIT(RX_USED);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
if (frag == last_frag)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-10-31 14:04:51 +08:00
|
|
|
/* Make descriptor updates visible to hardware */
|
|
|
|
wmb();
|
|
|
|
|
2012-10-31 14:04:58 +08:00
|
|
|
__skb_pull(skb, NET_IP_ALIGN);
|
2006-11-09 21:51:17 +08:00
|
|
|
skb->protocol = eth_type_trans(skb, bp->dev);
|
|
|
|
|
|
|
|
bp->stats.rx_packets++;
|
2012-10-31 14:04:58 +08:00
|
|
|
bp->stats.rx_bytes += skb->len;
|
2012-10-31 14:04:52 +08:00
|
|
|
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
|
2011-03-09 04:27:08 +08:00
|
|
|
skb->len, skb->csum);
|
2006-11-09 21:51:17 +08:00
|
|
|
netif_receive_skb(skb);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_rx(struct macb *bp, int budget)
|
|
|
|
{
|
|
|
|
int received = 0;
|
2012-10-31 14:04:55 +08:00
|
|
|
unsigned int tail;
|
2006-11-09 21:51:17 +08:00
|
|
|
int first_frag = -1;
|
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
for (tail = bp->rx_tail; budget > 0; tail++) {
|
|
|
|
struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
|
2006-11-09 21:51:17 +08:00
|
|
|
u32 addr, ctrl;
|
|
|
|
|
2012-10-31 14:04:51 +08:00
|
|
|
/* Make hw descriptor updates visible to CPU */
|
2006-11-09 21:51:17 +08:00
|
|
|
rmb();
|
2012-10-31 14:04:51 +08:00
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
addr = desc->addr;
|
|
|
|
ctrl = desc->ctrl;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
if (!(addr & MACB_BIT(RX_USED)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (ctrl & MACB_BIT(RX_SOF)) {
|
|
|
|
if (first_frag != -1)
|
|
|
|
discard_partial_frame(bp, first_frag, tail);
|
|
|
|
first_frag = tail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrl & MACB_BIT(RX_EOF)) {
|
|
|
|
int dropped;
|
|
|
|
BUG_ON(first_frag == -1);
|
|
|
|
|
|
|
|
dropped = macb_rx_frame(bp, first_frag, tail);
|
|
|
|
first_frag = -1;
|
|
|
|
if (!dropped) {
|
|
|
|
received++;
|
|
|
|
budget--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (first_frag != -1)
|
|
|
|
bp->rx_tail = first_frag;
|
|
|
|
else
|
|
|
|
bp->rx_tail = tail;
|
|
|
|
|
|
|
|
return received;
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
static int macb_poll(struct napi_struct *napi, int budget)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
struct macb *bp = container_of(napi, struct macb, napi);
|
|
|
|
int work_done;
|
2006-11-09 21:51:17 +08:00
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = macb_readl(bp, RSR);
|
|
|
|
macb_writel(bp, RSR, status);
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
work_done = 0;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:52 +08:00
|
|
|
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
|
2011-03-09 04:27:08 +08:00
|
|
|
(unsigned long)status, budget);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2013-06-05 05:57:12 +08:00
|
|
|
work_done = bp->macbgem_ops.mog_rx(bp, budget);
|
2010-10-25 09:44:22 +08:00
|
|
|
if (work_done < budget) {
|
2009-01-20 08:43:59 +08:00
|
|
|
napi_complete(napi);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2010-10-25 09:44:22 +08:00
|
|
|
/*
|
|
|
|
* We've done what we can to clean the buffers. Make sure we
|
|
|
|
* get notified when new packets arrive.
|
|
|
|
*/
|
|
|
|
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
|
2013-02-12 18:08:48 +08:00
|
|
|
|
|
|
|
/* Packets received while interrupts were disabled */
|
|
|
|
status = macb_readl(bp, RSR);
|
|
|
|
if (unlikely(status))
|
|
|
|
napi_reschedule(napi);
|
2010-10-25 09:44:22 +08:00
|
|
|
}
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
/* TODO: Handle errors */
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
return work_done;
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct net_device *dev = dev_id;
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = macb_readl(bp, ISR);
|
|
|
|
|
|
|
|
if (unlikely(!status))
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
|
|
spin_lock(&bp->lock);
|
|
|
|
|
|
|
|
while (status) {
|
|
|
|
/* close possible race with dev_close */
|
|
|
|
if (unlikely(!netif_running(dev))) {
|
2012-10-22 16:45:31 +08:00
|
|
|
macb_writel(bp, IDR, -1);
|
2006-11-09 21:51:17 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-10-31 14:04:52 +08:00
|
|
|
netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
if (status & MACB_RX_INT_FLAGS) {
|
2010-10-25 09:44:22 +08:00
|
|
|
/*
|
|
|
|
* There's no point taking any more interrupts
|
|
|
|
* until we have processed the buffers. The
|
|
|
|
* scheduling call may fail if the poll routine
|
|
|
|
* is already scheduled, so disable interrupts
|
|
|
|
* now.
|
|
|
|
*/
|
|
|
|
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
|
2013-05-14 11:00:16 +08:00
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
|
|
macb_writel(bp, ISR, MACB_BIT(RCOMP));
|
2010-10-25 09:44:22 +08:00
|
|
|
|
2009-01-20 08:43:59 +08:00
|
|
|
if (napi_schedule_prep(&bp->napi)) {
|
2012-10-31 14:04:52 +08:00
|
|
|
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
|
2009-01-20 08:43:59 +08:00
|
|
|
__napi_schedule(&bp->napi);
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-31 14:04:57 +08:00
|
|
|
if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
|
|
|
|
macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
|
|
|
|
schedule_work(&bp->tx_error_task);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status & MACB_BIT(TCOMP))
|
|
|
|
macb_tx_interrupt(bp);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Link change detection isn't possible with RMII, so we'll
|
|
|
|
* add that if/when we get our hands on a full-blown MII PHY.
|
|
|
|
*/
|
|
|
|
|
2011-04-13 13:03:24 +08:00
|
|
|
if (status & MACB_BIT(ISR_ROVR)) {
|
|
|
|
/* We missed at least one packet */
|
2011-11-08 18:12:32 +08:00
|
|
|
if (macb_is_gem(bp))
|
|
|
|
bp->hw_stats.gem.rx_overruns++;
|
|
|
|
else
|
|
|
|
bp->hw_stats.macb.rx_overruns++;
|
2011-04-13 13:03:24 +08:00
|
|
|
}
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
if (status & MACB_BIT(HRESP)) {
|
|
|
|
/*
|
2011-03-09 04:27:08 +08:00
|
|
|
* TODO: Reset the hardware, and maybe move the
|
|
|
|
* netdev_err to a lower-priority context as well
|
|
|
|
* (work queue?)
|
2006-11-09 21:51:17 +08:00
|
|
|
*/
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_err(dev, "DMA bus error: HRESP not OK\n");
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
status = macb_readl(bp, ISR);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&bp->lock);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2009-05-05 02:08:41 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
/*
|
|
|
|
* Polling receive - used by netconsole and other diagnostic tools
|
|
|
|
* to allow network i/o with interrupts disabled.
|
|
|
|
*/
|
|
|
|
static void macb_poll_controller(struct net_device *dev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
macb_interrupt(dev->irq, dev);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
dma_addr_t mapping;
|
|
|
|
unsigned int len, entry;
|
2012-10-31 14:04:55 +08:00
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
struct macb_tx_skb *tx_skb;
|
2006-11-09 21:51:17 +08:00
|
|
|
u32 ctrl;
|
2009-08-24 10:49:07 +08:00
|
|
|
unsigned long flags;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-31 14:04:52 +08:00
|
|
|
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
|
|
|
|
netdev_vdbg(bp->dev,
|
2011-03-09 04:27:08 +08:00
|
|
|
"start_xmit: len %u head %p data %p tail %p end %p\n",
|
|
|
|
skb->len, skb->head, skb->data,
|
|
|
|
skb_tail_pointer(skb), skb_end_pointer(skb));
|
|
|
|
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
|
|
|
|
skb->data, 16, true);
|
2006-11-09 21:51:17 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
len = skb->len;
|
2009-08-24 10:49:07 +08:00
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
/* This is a hard error, log it. */
|
2012-11-19 14:00:21 +08:00
|
|
|
if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
|
2006-11-09 21:51:17 +08:00
|
|
|
netif_stop_queue(dev);
|
2009-08-24 10:49:07 +08:00
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
|
|
|
|
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
|
|
|
|
bp->tx_head, bp->tx_tail);
|
2009-06-12 14:22:29 +08:00
|
|
|
return NETDEV_TX_BUSY;
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
entry = macb_tx_ring_wrap(bp->tx_head);
|
|
|
|
bp->tx_head++;
|
2012-10-31 14:04:52 +08:00
|
|
|
netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
|
2006-11-09 21:51:17 +08:00
|
|
|
mapping = dma_map_single(&bp->pdev->dev, skb->data,
|
|
|
|
len, DMA_TO_DEVICE);
|
2012-10-31 14:04:55 +08:00
|
|
|
|
|
|
|
tx_skb = &bp->tx_skb[entry];
|
|
|
|
tx_skb->skb = skb;
|
|
|
|
tx_skb->mapping = mapping;
|
2012-10-31 14:04:52 +08:00
|
|
|
netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
|
2011-03-09 04:27:08 +08:00
|
|
|
skb->data, (unsigned long)mapping);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
ctrl = MACB_BF(TX_FRMLEN, len);
|
|
|
|
ctrl |= MACB_BIT(TX_LAST);
|
|
|
|
if (entry == (TX_RING_SIZE - 1))
|
|
|
|
ctrl |= MACB_BIT(TX_WRAP);
|
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
desc = &bp->tx_ring[entry];
|
|
|
|
desc->addr = mapping;
|
|
|
|
desc->ctrl = ctrl;
|
2012-10-31 14:04:51 +08:00
|
|
|
|
|
|
|
/* Make newly initialized descriptor visible to hardware */
|
2006-11-09 21:51:17 +08:00
|
|
|
wmb();
|
|
|
|
|
2011-06-20 05:51:28 +08:00
|
|
|
skb_tx_timestamp(skb);
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
|
|
|
|
|
2012-11-19 14:00:21 +08:00
|
|
|
if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
|
2006-11-09 21:51:17 +08:00
|
|
|
netif_stop_queue(dev);
|
|
|
|
|
2009-08-24 10:49:07 +08:00
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2009-06-23 14:03:08 +08:00
|
|
|
return NETDEV_TX_OK;
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
2013-06-05 05:57:12 +08:00
|
|
|
static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
|
2013-06-05 05:57:11 +08:00
|
|
|
{
|
|
|
|
if (!macb_is_gem(bp)) {
|
|
|
|
bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
|
|
|
|
} else {
|
2013-06-05 05:57:12 +08:00
|
|
|
bp->rx_buffer_size = size;
|
2013-06-05 05:57:11 +08:00
|
|
|
|
|
|
|
if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
|
2013-06-05 05:57:12 +08:00
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"RX buffer must be multiple of %d bytes, expanding\n",
|
2013-06-05 05:57:11 +08:00
|
|
|
RX_BUFFER_MULTIPLE);
|
|
|
|
bp->rx_buffer_size =
|
2013-06-05 05:57:12 +08:00
|
|
|
roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
|
2013-06-05 05:57:11 +08:00
|
|
|
}
|
|
|
|
}
|
2013-06-05 05:57:12 +08:00
|
|
|
|
|
|
|
netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
|
|
|
|
bp->dev->mtu, bp->rx_buffer_size);
|
2013-06-05 05:57:11 +08:00
|
|
|
}
|
|
|
|
|
2013-06-05 05:57:12 +08:00
|
|
|
static void gem_free_rx_buffers(struct macb *bp)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct macb_dma_desc *desc;
|
|
|
|
dma_addr_t addr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!bp->rx_skbuff)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
|
|
|
skb = bp->rx_skbuff[i];
|
|
|
|
|
|
|
|
if (skb == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
desc = &bp->rx_ring[i];
|
|
|
|
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
|
|
|
|
dma_unmap_single(&bp->pdev->dev, addr, skb->len,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
skb = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(bp->rx_skbuff);
|
|
|
|
bp->rx_skbuff = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_free_rx_buffers(struct macb *bp)
|
|
|
|
{
|
|
|
|
if (bp->rx_buffers) {
|
|
|
|
dma_free_coherent(&bp->pdev->dev,
|
|
|
|
RX_RING_SIZE * bp->rx_buffer_size,
|
|
|
|
bp->rx_buffers, bp->rx_buffers_dma);
|
|
|
|
bp->rx_buffers = NULL;
|
|
|
|
}
|
|
|
|
}
|
2013-06-05 05:57:11 +08:00
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
static void macb_free_consistent(struct macb *bp)
|
|
|
|
{
|
|
|
|
if (bp->tx_skb) {
|
|
|
|
kfree(bp->tx_skb);
|
|
|
|
bp->tx_skb = NULL;
|
|
|
|
}
|
2013-06-05 05:57:12 +08:00
|
|
|
bp->macbgem_ops.mog_free_rx_buffers(bp);
|
2006-11-09 21:51:17 +08:00
|
|
|
if (bp->rx_ring) {
|
|
|
|
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
|
|
|
|
bp->rx_ring, bp->rx_ring_dma);
|
|
|
|
bp->rx_ring = NULL;
|
|
|
|
}
|
|
|
|
if (bp->tx_ring) {
|
|
|
|
dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
|
|
|
|
bp->tx_ring, bp->tx_ring_dma);
|
|
|
|
bp->tx_ring = NULL;
|
|
|
|
}
|
2013-06-05 05:57:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int gem_alloc_rx_buffers(struct macb *bp)
|
|
|
|
{
|
|
|
|
int size;
|
|
|
|
|
|
|
|
size = RX_RING_SIZE * sizeof(struct sk_buff *);
|
|
|
|
bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!bp->rx_skbuff)
|
|
|
|
return -ENOMEM;
|
|
|
|
else
|
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated %d RX struct sk_buff entries at %p\n",
|
|
|
|
RX_RING_SIZE, bp->rx_skbuff);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_alloc_rx_buffers(struct macb *bp)
|
|
|
|
{
|
|
|
|
int size;
|
|
|
|
|
|
|
|
size = RX_RING_SIZE * bp->rx_buffer_size;
|
|
|
|
bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
|
|
|
|
&bp->rx_buffers_dma, GFP_KERNEL);
|
|
|
|
if (!bp->rx_buffers)
|
|
|
|
return -ENOMEM;
|
|
|
|
else
|
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
|
|
|
|
size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
|
|
|
|
return 0;
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_alloc_consistent(struct macb *bp)
|
|
|
|
{
|
|
|
|
int size;
|
|
|
|
|
2012-10-31 14:04:55 +08:00
|
|
|
size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
|
2006-11-09 21:51:17 +08:00
|
|
|
bp->tx_skb = kmalloc(size, GFP_KERNEL);
|
|
|
|
if (!bp->tx_skb)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
size = RX_RING_BYTES;
|
|
|
|
bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
|
|
|
|
&bp->rx_ring_dma, GFP_KERNEL);
|
|
|
|
if (!bp->rx_ring)
|
|
|
|
goto out_err;
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
|
|
|
|
size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
size = TX_RING_BYTES;
|
|
|
|
bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
|
|
|
|
&bp->tx_ring_dma, GFP_KERNEL);
|
|
|
|
if (!bp->tx_ring)
|
|
|
|
goto out_err;
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_dbg(bp->dev,
|
|
|
|
"Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
|
|
|
|
size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2013-06-05 05:57:12 +08:00
|
|
|
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
|
2006-11-09 21:51:17 +08:00
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
macb_free_consistent(bp);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-06-05 05:57:12 +08:00
|
|
|
static void gem_init_rings(struct macb *bp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < TX_RING_SIZE; i++) {
|
|
|
|
bp->tx_ring[i].addr = 0;
|
|
|
|
bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
|
|
|
|
}
|
|
|
|
bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
|
|
|
|
|
|
|
|
bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
|
|
|
|
|
|
|
|
gem_rx_refill(bp);
|
|
|
|
}
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
static void macb_init_rings(struct macb *bp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
dma_addr_t addr;
|
|
|
|
|
|
|
|
addr = bp->rx_buffers_dma;
|
|
|
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
|
|
|
bp->rx_ring[i].addr = addr;
|
|
|
|
bp->rx_ring[i].ctrl = 0;
|
2013-06-05 05:57:11 +08:00
|
|
|
addr += bp->rx_buffer_size;
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
|
|
|
|
|
|
|
|
for (i = 0; i < TX_RING_SIZE; i++) {
|
|
|
|
bp->tx_ring[i].addr = 0;
|
|
|
|
bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
|
|
|
|
}
|
|
|
|
bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
|
|
|
|
|
|
|
|
bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_reset_hw(struct macb *bp)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Disable RX and TX (XXX: Should we halt the transmission
|
|
|
|
* more gracefully?)
|
|
|
|
*/
|
|
|
|
macb_writel(bp, NCR, 0);
|
|
|
|
|
|
|
|
/* Clear the stats registers (XXX: Update stats first?) */
|
|
|
|
macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
|
|
|
|
|
|
|
|
/* Clear all status flags */
|
2012-10-22 16:45:31 +08:00
|
|
|
macb_writel(bp, TSR, -1);
|
|
|
|
macb_writel(bp, RSR, -1);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
/* Disable all interrupts */
|
2012-10-22 16:45:31 +08:00
|
|
|
macb_writel(bp, IDR, -1);
|
2006-11-09 21:51:17 +08:00
|
|
|
macb_readl(bp, ISR);
|
|
|
|
}
|
|
|
|
|
2011-03-10 00:22:54 +08:00
|
|
|
static u32 gem_mdc_clk_div(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 config;
|
|
|
|
unsigned long pclk_hz = clk_get_rate(bp->pclk);
|
|
|
|
|
|
|
|
if (pclk_hz <= 20000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV8);
|
|
|
|
else if (pclk_hz <= 40000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV16);
|
|
|
|
else if (pclk_hz <= 80000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV32);
|
|
|
|
else if (pclk_hz <= 120000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV48);
|
|
|
|
else if (pclk_hz <= 160000000)
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV64);
|
|
|
|
else
|
|
|
|
config = GEM_BF(CLK, GEM_CLK_DIV96);
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 macb_mdc_clk_div(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 config;
|
|
|
|
unsigned long pclk_hz;
|
|
|
|
|
|
|
|
if (macb_is_gem(bp))
|
|
|
|
return gem_mdc_clk_div(bp);
|
|
|
|
|
|
|
|
pclk_hz = clk_get_rate(bp->pclk);
|
|
|
|
if (pclk_hz <= 20000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV8);
|
|
|
|
else if (pclk_hz <= 40000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV16);
|
|
|
|
else if (pclk_hz <= 80000000)
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV32);
|
|
|
|
else
|
|
|
|
config = MACB_BF(CLK, MACB_CLK_DIV64);
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
2011-03-10 00:29:59 +08:00
|
|
|
/*
|
|
|
|
* Get the DMA bus width field of the network configuration register that we
|
|
|
|
* should program. We find the width from decoding the design configuration
|
|
|
|
* register to find the maximum supported data bus width.
|
|
|
|
*/
|
|
|
|
static u32 macb_dbw(struct macb *bp)
|
|
|
|
{
|
|
|
|
if (!macb_is_gem(bp))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
|
|
|
|
case 4:
|
|
|
|
return GEM_BF(DBW, GEM_DBW128);
|
|
|
|
case 2:
|
|
|
|
return GEM_BF(DBW, GEM_DBW64);
|
|
|
|
case 1:
|
|
|
|
default:
|
|
|
|
return GEM_BF(DBW, GEM_DBW32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-15 01:38:30 +08:00
|
|
|
/*
|
2012-11-23 11:49:01 +08:00
|
|
|
* Configure the receive DMA engine
|
|
|
|
* - use the correct receive buffer size
|
|
|
|
* - set the possibility to use INCR16 bursts
|
|
|
|
* (if not supported by FIFO, it will fallback to default)
|
|
|
|
* - set both rx/tx packet buffers to full memory size
|
|
|
|
* These are configurable parameters for GEM.
|
2011-03-15 01:38:30 +08:00
|
|
|
*/
|
|
|
|
static void macb_configure_dma(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 dmacfg;
|
|
|
|
|
|
|
|
if (macb_is_gem(bp)) {
|
|
|
|
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
|
2013-06-05 05:57:11 +08:00
|
|
|
dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
|
2012-11-23 11:49:01 +08:00
|
|
|
dmacfg |= GEM_BF(FBLDO, 16);
|
|
|
|
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
|
2013-03-28 07:07:06 +08:00
|
|
|
dmacfg &= ~GEM_BIT(ENDIA);
|
2011-03-15 01:38:30 +08:00
|
|
|
gem_writel(bp, DMACFG, dmacfg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-14 11:00:16 +08:00
|
|
|
/*
|
|
|
|
* Configure peripheral capacities according to integration options used
|
|
|
|
*/
|
|
|
|
static void macb_configure_caps(struct macb *bp)
|
|
|
|
{
|
|
|
|
if (macb_is_gem(bp)) {
|
2013-07-09 16:36:00 +08:00
|
|
|
if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
|
2013-05-14 11:00:16 +08:00
|
|
|
bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
static void macb_init_hw(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 config;
|
|
|
|
|
|
|
|
macb_reset_hw(bp);
|
2012-11-07 16:14:52 +08:00
|
|
|
macb_set_hwaddr(bp);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2011-03-10 00:22:54 +08:00
|
|
|
config = macb_mdc_clk_div(bp);
|
2012-10-31 14:04:58 +08:00
|
|
|
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
|
2006-11-09 21:51:17 +08:00
|
|
|
config |= MACB_BIT(PAE); /* PAuse Enable */
|
|
|
|
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
|
2010-04-08 12:53:41 +08:00
|
|
|
config |= MACB_BIT(BIG); /* Receive oversized frames */
|
2006-11-09 21:51:17 +08:00
|
|
|
if (bp->dev->flags & IFF_PROMISC)
|
|
|
|
config |= MACB_BIT(CAF); /* Copy All Frames */
|
|
|
|
if (!(bp->dev->flags & IFF_BROADCAST))
|
|
|
|
config |= MACB_BIT(NBC); /* No BroadCast */
|
2011-03-10 00:29:59 +08:00
|
|
|
config |= macb_dbw(bp);
|
2006-11-09 21:51:17 +08:00
|
|
|
macb_writel(bp, NCFGR, config);
|
2012-11-02 15:09:24 +08:00
|
|
|
bp->speed = SPEED_10;
|
|
|
|
bp->duplex = DUPLEX_HALF;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2011-03-15 01:38:30 +08:00
|
|
|
macb_configure_dma(bp);
|
2013-05-14 11:00:16 +08:00
|
|
|
macb_configure_caps(bp);
|
2011-03-15 01:38:30 +08:00
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
/* Initialize TX and RX buffers */
|
|
|
|
macb_writel(bp, RBQP, bp->rx_ring_dma);
|
|
|
|
macb_writel(bp, TBQP, bp->tx_ring_dma);
|
|
|
|
|
|
|
|
/* Enable TX and RX */
|
2007-07-13 01:07:24 +08:00
|
|
|
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
/* Enable interrupts */
|
2012-10-31 14:04:57 +08:00
|
|
|
macb_writel(bp, IER, (MACB_RX_INT_FLAGS
|
|
|
|
| MACB_TX_INT_FLAGS
|
2006-11-09 21:51:17 +08:00
|
|
|
| MACB_BIT(HRESP)));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-07-13 01:07:25 +08:00
|
|
|
/*
|
|
|
|
* The hash address register is 64 bits long and takes up two
|
|
|
|
* locations in the memory map. The least significant bits are stored
|
|
|
|
* in EMAC_HSL and the most significant bits in EMAC_HSH.
|
|
|
|
*
|
|
|
|
* The unicast hash enable and the multicast hash enable bits in the
|
|
|
|
* network configuration register enable the reception of hash matched
|
|
|
|
* frames. The destination address is reduced to a 6 bit index into
|
|
|
|
* the 64 bit hash register using the following hash function. The
|
|
|
|
* hash function is an exclusive or of every sixth bit of the
|
|
|
|
* destination address.
|
|
|
|
*
|
|
|
|
* hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
|
|
|
|
* hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
|
|
|
|
* hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
|
|
|
|
* hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
|
|
|
|
* hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
|
|
|
|
* hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
|
|
|
|
*
|
|
|
|
* da[0] represents the least significant bit of the first byte
|
|
|
|
* received, that is, the multicast/unicast indicator, and da[47]
|
|
|
|
* represents the most significant bit of the last byte received. If
|
|
|
|
* the hash index, hi[n], points to a bit that is set in the hash
|
|
|
|
* register then the frame will be matched according to whether the
|
|
|
|
* frame is multicast or unicast. A multicast match will be signalled
|
|
|
|
* if the multicast hash enable bit is set, da[0] is 1 and the hash
|
|
|
|
* index points to a bit set in the hash register. A unicast match
|
|
|
|
* will be signalled if the unicast hash enable bit is set, da[0] is 0
|
|
|
|
* and the hash index points to a bit set in the hash register. To
|
|
|
|
* receive all multicast frames, the hash register should be set with
|
|
|
|
* all ones and the multicast hash enable bit should be set in the
|
|
|
|
* network configuration register.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline int hash_bit_value(int bitnr, __u8 *addr)
|
|
|
|
{
|
|
|
|
if (addr[bitnr / 8] & (1 << (bitnr % 8)))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the hash index value for the specified address.
|
|
|
|
*/
|
|
|
|
static int hash_get_index(__u8 *addr)
|
|
|
|
{
|
|
|
|
int i, j, bitval;
|
|
|
|
int hash_index = 0;
|
|
|
|
|
|
|
|
for (j = 0; j < 6; j++) {
|
|
|
|
for (i = 0, bitval = 0; i < 8; i++)
|
|
|
|
bitval ^= hash_bit_value(i*6 + j, addr);
|
|
|
|
|
|
|
|
hash_index |= (bitval << j);
|
|
|
|
}
|
|
|
|
|
|
|
|
return hash_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add multicast addresses to the internal multicast-hash table.
|
|
|
|
*/
|
|
|
|
static void macb_sethashtable(struct net_device *dev)
|
|
|
|
{
|
2010-04-02 05:22:57 +08:00
|
|
|
struct netdev_hw_addr *ha;
|
2007-07-13 01:07:25 +08:00
|
|
|
unsigned long mc_filter[2];
|
2010-02-23 17:19:49 +08:00
|
|
|
unsigned int bitnr;
|
2007-07-13 01:07:25 +08:00
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
|
|
|
|
mc_filter[0] = mc_filter[1] = 0;
|
|
|
|
|
2010-04-02 05:22:57 +08:00
|
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
|
|
bitnr = hash_get_index(ha->addr);
|
2007-07-13 01:07:25 +08:00
|
|
|
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
|
|
|
|
}
|
|
|
|
|
2011-11-08 18:12:32 +08:00
|
|
|
macb_or_gem_writel(bp, HRB, mc_filter[0]);
|
|
|
|
macb_or_gem_writel(bp, HRT, mc_filter[1]);
|
2007-07-13 01:07:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable/Disable promiscuous and multicast modes.
|
|
|
|
*/
|
2012-10-18 19:01:15 +08:00
|
|
|
void macb_set_rx_mode(struct net_device *dev)
|
2007-07-13 01:07:25 +08:00
|
|
|
{
|
|
|
|
unsigned long cfg;
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
|
|
|
|
cfg = macb_readl(bp, NCFGR);
|
|
|
|
|
|
|
|
if (dev->flags & IFF_PROMISC)
|
|
|
|
/* Enable promiscuous mode */
|
|
|
|
cfg |= MACB_BIT(CAF);
|
|
|
|
else if (dev->flags & (~IFF_PROMISC))
|
|
|
|
/* Disable promiscuous mode */
|
|
|
|
cfg &= ~MACB_BIT(CAF);
|
|
|
|
|
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
|
|
/* Enable all multicast mode */
|
2011-11-08 18:12:32 +08:00
|
|
|
macb_or_gem_writel(bp, HRB, -1);
|
|
|
|
macb_or_gem_writel(bp, HRT, -1);
|
2007-07-13 01:07:25 +08:00
|
|
|
cfg |= MACB_BIT(NCFGR_MTI);
|
2010-02-08 12:30:35 +08:00
|
|
|
} else if (!netdev_mc_empty(dev)) {
|
2007-07-13 01:07:25 +08:00
|
|
|
/* Enable specific multicasts */
|
|
|
|
macb_sethashtable(dev);
|
|
|
|
cfg |= MACB_BIT(NCFGR_MTI);
|
|
|
|
} else if (dev->flags & (~IFF_ALLMULTI)) {
|
|
|
|
/* Disable all multicast mode */
|
2011-11-08 18:12:32 +08:00
|
|
|
macb_or_gem_writel(bp, HRB, 0);
|
|
|
|
macb_or_gem_writel(bp, HRT, 0);
|
2007-07-13 01:07:25 +08:00
|
|
|
cfg &= ~MACB_BIT(NCFGR_MTI);
|
|
|
|
}
|
|
|
|
|
|
|
|
macb_writel(bp, NCFGR, cfg);
|
|
|
|
}
|
2012-10-18 19:01:15 +08:00
|
|
|
EXPORT_SYMBOL_GPL(macb_set_rx_mode);
|
2007-07-13 01:07:25 +08:00
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
static int macb_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2013-06-05 05:57:12 +08:00
|
|
|
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
|
2006-11-09 21:51:17 +08:00
|
|
|
int err;
|
|
|
|
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_dbg(bp->dev, "open\n");
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-07-04 07:14:13 +08:00
|
|
|
/* carrier starts down */
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
/* if the phy is not yet register, retry later*/
|
|
|
|
if (!bp->phy_dev)
|
|
|
|
return -EAGAIN;
|
2013-06-05 05:57:11 +08:00
|
|
|
|
|
|
|
/* RX buffers initialization */
|
2013-06-05 05:57:12 +08:00
|
|
|
macb_init_rx_buffer_size(bp, bufsz);
|
2007-07-13 01:07:24 +08:00
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
err = macb_alloc_consistent(bp);
|
|
|
|
if (err) {
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
|
|
|
|
err);
|
2006-11-09 21:51:17 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
napi_enable(&bp->napi);
|
|
|
|
|
2013-06-05 05:57:12 +08:00
|
|
|
bp->macbgem_ops.mog_init_rings(bp);
|
2006-11-09 21:51:17 +08:00
|
|
|
macb_init_hw(bp);
|
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
/* schedule a link state check */
|
|
|
|
phy_start(bp->phy_dev);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
netif_start_queue(dev);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
netif_stop_queue(dev);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
napi_disable(&bp->napi);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
if (bp->phy_dev)
|
|
|
|
phy_stop(bp->phy_dev);
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
spin_lock_irqsave(&bp->lock, flags);
|
|
|
|
macb_reset_hw(bp);
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
|
|
|
|
|
|
macb_free_consistent(bp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-03-10 00:26:35 +08:00
|
|
|
static void gem_update_stats(struct macb *bp)
|
|
|
|
{
|
|
|
|
u32 __iomem *reg = bp->regs + GEM_OTX;
|
|
|
|
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
|
|
|
|
u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
|
|
|
|
|
|
|
|
for (; p < end; p++, reg++)
|
|
|
|
*p += __raw_readl(reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct net_device_stats *gem_get_stats(struct macb *bp)
|
|
|
|
{
|
|
|
|
struct gem_stats *hwstat = &bp->hw_stats.gem;
|
|
|
|
struct net_device_stats *nstat = &bp->stats;
|
|
|
|
|
|
|
|
gem_update_stats(bp);
|
|
|
|
|
|
|
|
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
|
|
|
|
hwstat->rx_alignment_errors +
|
|
|
|
hwstat->rx_resource_errors +
|
|
|
|
hwstat->rx_overruns +
|
|
|
|
hwstat->rx_oversize_frames +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersized_frames +
|
|
|
|
hwstat->rx_length_field_frame_errors);
|
|
|
|
nstat->tx_errors = (hwstat->tx_late_collisions +
|
|
|
|
hwstat->tx_excessive_collisions +
|
|
|
|
hwstat->tx_underrun +
|
|
|
|
hwstat->tx_carrier_sense_errors);
|
|
|
|
nstat->multicast = hwstat->rx_multicast_frames;
|
|
|
|
nstat->collisions = (hwstat->tx_single_collision_frames +
|
|
|
|
hwstat->tx_multiple_collision_frames +
|
|
|
|
hwstat->tx_excessive_collisions);
|
|
|
|
nstat->rx_length_errors = (hwstat->rx_oversize_frames +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersized_frames +
|
|
|
|
hwstat->rx_length_field_frame_errors);
|
|
|
|
nstat->rx_over_errors = hwstat->rx_resource_errors;
|
|
|
|
nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
|
|
|
|
nstat->rx_frame_errors = hwstat->rx_alignment_errors;
|
|
|
|
nstat->rx_fifo_errors = hwstat->rx_overruns;
|
|
|
|
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
|
|
|
|
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
|
|
|
|
nstat->tx_fifo_errors = hwstat->tx_underrun;
|
|
|
|
|
|
|
|
return nstat;
|
|
|
|
}
|
|
|
|
|
2012-11-07 16:14:54 +08:00
|
|
|
struct net_device_stats *macb_get_stats(struct net_device *dev)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
struct net_device_stats *nstat = &bp->stats;
|
2011-03-10 00:26:35 +08:00
|
|
|
struct macb_stats *hwstat = &bp->hw_stats.macb;
|
|
|
|
|
|
|
|
if (macb_is_gem(bp))
|
|
|
|
return gem_get_stats(bp);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
/* read stats from hardware */
|
|
|
|
macb_update_stats(bp);
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
/* Convert HW stats into netdevice stats */
|
|
|
|
nstat->rx_errors = (hwstat->rx_fcs_errors +
|
|
|
|
hwstat->rx_align_errors +
|
|
|
|
hwstat->rx_resource_errors +
|
|
|
|
hwstat->rx_overruns +
|
|
|
|
hwstat->rx_oversize_pkts +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersize_pkts +
|
|
|
|
hwstat->sqe_test_errors +
|
|
|
|
hwstat->rx_length_mismatch);
|
|
|
|
nstat->tx_errors = (hwstat->tx_late_cols +
|
|
|
|
hwstat->tx_excessive_cols +
|
|
|
|
hwstat->tx_underruns +
|
|
|
|
hwstat->tx_carrier_errors);
|
|
|
|
nstat->collisions = (hwstat->tx_single_cols +
|
|
|
|
hwstat->tx_multiple_cols +
|
|
|
|
hwstat->tx_excessive_cols);
|
|
|
|
nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
|
|
|
|
hwstat->rx_jabbers +
|
|
|
|
hwstat->rx_undersize_pkts +
|
|
|
|
hwstat->rx_length_mismatch);
|
2011-04-13 13:03:24 +08:00
|
|
|
nstat->rx_over_errors = hwstat->rx_resource_errors +
|
|
|
|
hwstat->rx_overruns;
|
2006-11-09 21:51:17 +08:00
|
|
|
nstat->rx_crc_errors = hwstat->rx_fcs_errors;
|
|
|
|
nstat->rx_frame_errors = hwstat->rx_align_errors;
|
|
|
|
nstat->rx_fifo_errors = hwstat->rx_overruns;
|
|
|
|
/* XXX: What does "missed" mean? */
|
|
|
|
nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
|
|
|
|
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
|
|
|
|
nstat->tx_fifo_errors = hwstat->tx_underruns;
|
|
|
|
/* Don't know about heartbeat or window errors... */
|
|
|
|
|
|
|
|
return nstat;
|
|
|
|
}
|
2012-11-07 16:14:54 +08:00
|
|
|
EXPORT_SYMBOL_GPL(macb_get_stats);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2007-07-13 01:07:24 +08:00
|
|
|
struct phy_device *phydev = bp->phy_dev;
|
|
|
|
|
|
|
|
if (!phydev)
|
|
|
|
return -ENODEV;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
return phy_ethtool_gset(phydev, cmd);
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2007-07-13 01:07:24 +08:00
|
|
|
struct phy_device *phydev = bp->phy_dev;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
if (!phydev)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return phy_ethtool_sset(phydev, cmd);
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
|
|
|
|
2012-10-31 14:04:56 +08:00
|
|
|
static int macb_get_regs_len(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
return MACB_GREGS_NBR * sizeof(u32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
|
|
|
void *p)
|
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
|
|
|
unsigned int tail, head;
|
|
|
|
u32 *regs_buff = p;
|
|
|
|
|
|
|
|
regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
|
|
|
|
| MACB_GREGS_VERSION;
|
|
|
|
|
|
|
|
tail = macb_tx_ring_wrap(bp->tx_tail);
|
|
|
|
head = macb_tx_ring_wrap(bp->tx_head);
|
|
|
|
|
|
|
|
regs_buff[0] = macb_readl(bp, NCR);
|
|
|
|
regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
|
|
|
|
regs_buff[2] = macb_readl(bp, NSR);
|
|
|
|
regs_buff[3] = macb_readl(bp, TSR);
|
|
|
|
regs_buff[4] = macb_readl(bp, RBQP);
|
|
|
|
regs_buff[5] = macb_readl(bp, TBQP);
|
|
|
|
regs_buff[6] = macb_readl(bp, RSR);
|
|
|
|
regs_buff[7] = macb_readl(bp, IMR);
|
|
|
|
|
|
|
|
regs_buff[8] = tail;
|
|
|
|
regs_buff[9] = head;
|
|
|
|
regs_buff[10] = macb_tx_dma(bp, tail);
|
|
|
|
regs_buff[11] = macb_tx_dma(bp, head);
|
|
|
|
|
|
|
|
if (macb_is_gem(bp)) {
|
|
|
|
regs_buff[12] = gem_readl(bp, USRIO);
|
|
|
|
regs_buff[13] = gem_readl(bp, DMACFG);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-18 19:01:12 +08:00
|
|
|
const struct ethtool_ops macb_ethtool_ops = {
|
2006-11-09 21:51:17 +08:00
|
|
|
.get_settings = macb_get_settings,
|
|
|
|
.set_settings = macb_set_settings,
|
2012-10-31 14:04:56 +08:00
|
|
|
.get_regs_len = macb_get_regs_len,
|
|
|
|
.get_regs = macb_get_regs,
|
2006-11-09 21:51:17 +08:00
|
|
|
.get_link = ethtool_op_get_link,
|
2012-04-04 06:59:31 +08:00
|
|
|
.get_ts_info = ethtool_op_get_ts_info,
|
2006-11-09 21:51:17 +08:00
|
|
|
};
|
2012-10-18 19:01:12 +08:00
|
|
|
EXPORT_SYMBOL_GPL(macb_ethtool_ops);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-10-18 19:01:12 +08:00
|
|
|
int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
|
|
|
struct macb *bp = netdev_priv(dev);
|
2007-07-13 01:07:24 +08:00
|
|
|
struct phy_device *phydev = bp->phy_dev;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
if (!netif_running(dev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
if (!phydev)
|
|
|
|
return -ENODEV;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2010-07-17 16:48:55 +08:00
|
|
|
return phy_mii_ioctl(phydev, rq, cmd);
|
2006-11-09 21:51:17 +08:00
|
|
|
}
|
2012-10-18 19:01:12 +08:00
|
|
|
EXPORT_SYMBOL_GPL(macb_ioctl);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2009-04-11 15:42:26 +08:00
|
|
|
static const struct net_device_ops macb_netdev_ops = {
|
|
|
|
.ndo_open = macb_open,
|
|
|
|
.ndo_stop = macb_close,
|
|
|
|
.ndo_start_xmit = macb_start_xmit,
|
2011-08-16 14:29:01 +08:00
|
|
|
.ndo_set_rx_mode = macb_set_rx_mode,
|
2009-04-11 15:42:26 +08:00
|
|
|
.ndo_get_stats = macb_get_stats,
|
|
|
|
.ndo_do_ioctl = macb_ioctl,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_change_mtu = eth_change_mtu,
|
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
2009-05-05 02:08:41 +08:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = macb_poll_controller,
|
|
|
|
#endif
|
2009-04-11 15:42:26 +08:00
|
|
|
};
|
|
|
|
|
2011-11-18 22:29:25 +08:00
|
|
|
#if defined(CONFIG_OF)
|
|
|
|
static const struct of_device_id macb_dt_ids[] = {
|
|
|
|
{ .compatible = "cdns,at32ap7000-macb" },
|
|
|
|
{ .compatible = "cdns,at91sam9260-macb" },
|
|
|
|
{ .compatible = "cdns,macb" },
|
|
|
|
{ .compatible = "cdns,pc302-gem" },
|
|
|
|
{ .compatible = "cdns,gem" },
|
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, macb_dt_ids);
|
|
|
|
#endif
|
|
|
|
|
2008-01-31 20:10:22 +08:00
|
|
|
static int __init macb_probe(struct platform_device *pdev)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
2011-03-09 04:17:06 +08:00
|
|
|
struct macb_platform_data *pdata;
|
2006-11-09 21:51:17 +08:00
|
|
|
struct resource *regs;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macb *bp;
|
2007-07-13 01:07:24 +08:00
|
|
|
struct phy_device *phydev;
|
2006-11-09 21:51:17 +08:00
|
|
|
u32 config;
|
|
|
|
int err = -ENXIO;
|
2012-10-31 14:04:59 +08:00
|
|
|
struct pinctrl *pinctrl;
|
2013-04-02 17:35:09 +08:00
|
|
|
const char *mac;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
|
if (!regs) {
|
|
|
|
dev_err(&pdev->dev, "no mmio resource defined\n");
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2012-10-31 14:04:59 +08:00
|
|
|
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
|
|
|
|
if (IS_ERR(pinctrl)) {
|
|
|
|
err = PTR_ERR(pinctrl);
|
|
|
|
if (err == -EPROBE_DEFER)
|
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
dev_warn(&pdev->dev, "No pinctrl provided\n");
|
|
|
|
}
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
dev = alloc_etherdev(sizeof(*bp));
|
2012-01-29 21:47:52 +08:00
|
|
|
if (!dev)
|
2006-11-09 21:51:17 +08:00
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
|
|
|
|
|
|
|
/* TODO: Actually, we have some interesting features... */
|
|
|
|
dev->features |= 0;
|
|
|
|
|
|
|
|
bp = netdev_priv(dev);
|
|
|
|
bp->pdev = pdev;
|
|
|
|
bp->dev = dev;
|
|
|
|
|
|
|
|
spin_lock_init(&bp->lock);
|
2012-10-31 14:04:57 +08:00
|
|
|
INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2011-03-09 04:19:23 +08:00
|
|
|
bp->pclk = clk_get(&pdev->dev, "pclk");
|
2007-02-07 23:40:44 +08:00
|
|
|
if (IS_ERR(bp->pclk)) {
|
|
|
|
dev_err(&pdev->dev, "failed to get macb_clk\n");
|
|
|
|
goto err_out_free_dev;
|
|
|
|
}
|
2013-03-28 07:07:07 +08:00
|
|
|
clk_prepare_enable(bp->pclk);
|
2011-03-09 04:19:23 +08:00
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
bp->hclk = clk_get(&pdev->dev, "hclk");
|
|
|
|
if (IS_ERR(bp->hclk)) {
|
|
|
|
dev_err(&pdev->dev, "failed to get hclk\n");
|
|
|
|
goto err_out_put_pclk;
|
|
|
|
}
|
2013-03-28 07:07:07 +08:00
|
|
|
clk_prepare_enable(bp->hclk);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2011-06-10 00:13:32 +08:00
|
|
|
bp->regs = ioremap(regs->start, resource_size(regs));
|
2006-11-09 21:51:17 +08:00
|
|
|
if (!bp->regs) {
|
|
|
|
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out_disable_clocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->irq = platform_get_irq(pdev, 0);
|
2011-03-29 00:27:31 +08:00
|
|
|
err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
|
2006-11-09 21:51:17 +08:00
|
|
|
if (err) {
|
2011-03-09 04:27:08 +08:00
|
|
|
dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
|
|
|
|
dev->irq, err);
|
2006-11-09 21:51:17 +08:00
|
|
|
goto err_out_iounmap;
|
|
|
|
}
|
|
|
|
|
2009-04-11 15:42:26 +08:00
|
|
|
dev->netdev_ops = &macb_netdev_ops;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 07:41:36 +08:00
|
|
|
netif_napi_add(dev, &bp->napi, macb_poll, 64);
|
2006-11-09 21:51:17 +08:00
|
|
|
dev->ethtool_ops = &macb_ethtool_ops;
|
|
|
|
|
|
|
|
dev->base_addr = regs->start;
|
|
|
|
|
2013-06-05 05:57:12 +08:00
|
|
|
/* setup appropriated routines according to adapter type */
|
|
|
|
if (macb_is_gem(bp)) {
|
|
|
|
bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_init_rings = gem_init_rings;
|
|
|
|
bp->macbgem_ops.mog_rx = gem_rx;
|
|
|
|
} else {
|
|
|
|
bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
|
|
|
|
bp->macbgem_ops.mog_init_rings = macb_init_rings;
|
|
|
|
bp->macbgem_ops.mog_rx = macb_rx;
|
|
|
|
}
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
/* Set MII management clock divider */
|
2011-03-10 00:22:54 +08:00
|
|
|
config = macb_mdc_clk_div(bp);
|
2011-03-10 00:29:59 +08:00
|
|
|
config |= macb_dbw(bp);
|
2006-11-09 21:51:17 +08:00
|
|
|
macb_writel(bp, NCFGR, config);
|
|
|
|
|
2013-04-02 17:35:09 +08:00
|
|
|
mac = of_get_mac_address(pdev->dev.of_node);
|
|
|
|
if (mac)
|
|
|
|
memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
|
|
|
|
else
|
2011-11-18 22:29:25 +08:00
|
|
|
macb_get_hwaddr(bp);
|
|
|
|
|
2013-04-02 17:35:09 +08:00
|
|
|
err = of_get_phy_mode(pdev->dev.of_node);
|
2011-11-18 22:29:25 +08:00
|
|
|
if (err < 0) {
|
2013-08-30 13:12:21 +08:00
|
|
|
pdata = dev_get_platdata(&pdev->dev);
|
2011-11-18 22:29:25 +08:00
|
|
|
if (pdata && pdata->is_rmii)
|
|
|
|
bp->phy_interface = PHY_INTERFACE_MODE_RMII;
|
|
|
|
else
|
|
|
|
bp->phy_interface = PHY_INTERFACE_MODE_MII;
|
|
|
|
} else {
|
|
|
|
bp->phy_interface = err;
|
|
|
|
}
|
2007-07-13 01:07:24 +08:00
|
|
|
|
2012-10-31 14:04:50 +08:00
|
|
|
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
|
|
|
|
macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
|
|
|
|
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
|
2007-02-07 23:40:44 +08:00
|
|
|
#if defined(CONFIG_ARCH_AT91)
|
2011-11-08 18:12:32 +08:00
|
|
|
macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
|
|
|
|
MACB_BIT(CLKEN)));
|
2007-02-07 23:40:44 +08:00
|
|
|
#else
|
2011-11-08 18:12:32 +08:00
|
|
|
macb_or_gem_writel(bp, USRIO, 0);
|
2007-02-07 23:40:44 +08:00
|
|
|
#endif
|
2006-11-09 21:51:17 +08:00
|
|
|
else
|
2007-02-07 23:40:44 +08:00
|
|
|
#if defined(CONFIG_ARCH_AT91)
|
2011-11-08 18:12:32 +08:00
|
|
|
macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
|
2007-02-07 23:40:44 +08:00
|
|
|
#else
|
2011-11-08 18:12:32 +08:00
|
|
|
macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
|
2007-02-07 23:40:44 +08:00
|
|
|
#endif
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
err = register_netdev(dev);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
|
|
|
|
goto err_out_free_irq;
|
|
|
|
}
|
|
|
|
|
2013-04-15 06:04:33 +08:00
|
|
|
err = macb_mii_init(bp);
|
|
|
|
if (err)
|
2007-07-13 01:07:24 +08:00
|
|
|
goto err_out_unregister_netdev;
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
platform_set_drvdata(pdev, dev);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2012-07-04 07:14:13 +08:00
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2011-11-08 18:12:32 +08:00
|
|
|
netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
|
|
|
|
macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
|
|
|
|
dev->irq, dev->dev_addr);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
phydev = bp->phy_dev;
|
2011-03-09 04:27:08 +08:00
|
|
|
netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
|
|
|
|
phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
|
2007-07-13 01:07:24 +08:00
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
return 0;
|
|
|
|
|
2007-07-13 01:07:24 +08:00
|
|
|
err_out_unregister_netdev:
|
|
|
|
unregister_netdev(dev);
|
2006-11-09 21:51:17 +08:00
|
|
|
err_out_free_irq:
|
|
|
|
free_irq(dev->irq, dev);
|
|
|
|
err_out_iounmap:
|
|
|
|
iounmap(bp->regs);
|
|
|
|
err_out_disable_clocks:
|
2013-03-28 07:07:07 +08:00
|
|
|
clk_disable_unprepare(bp->hclk);
|
2006-11-09 21:51:17 +08:00
|
|
|
clk_put(bp->hclk);
|
2013-03-28 07:07:07 +08:00
|
|
|
clk_disable_unprepare(bp->pclk);
|
2006-11-09 21:51:17 +08:00
|
|
|
err_out_put_pclk:
|
|
|
|
clk_put(bp->pclk);
|
|
|
|
err_out_free_dev:
|
|
|
|
free_netdev(dev);
|
|
|
|
err_out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-01-31 20:10:22 +08:00
|
|
|
static int __exit macb_remove(struct platform_device *pdev)
|
2006-11-09 21:51:17 +08:00
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macb *bp;
|
|
|
|
|
|
|
|
dev = platform_get_drvdata(pdev);
|
|
|
|
|
|
|
|
if (dev) {
|
|
|
|
bp = netdev_priv(dev);
|
2008-04-10 22:30:07 +08:00
|
|
|
if (bp->phy_dev)
|
|
|
|
phy_disconnect(bp->phy_dev);
|
2008-10-09 07:29:57 +08:00
|
|
|
mdiobus_unregister(bp->mii_bus);
|
|
|
|
kfree(bp->mii_bus->irq);
|
|
|
|
mdiobus_free(bp->mii_bus);
|
2006-11-09 21:51:17 +08:00
|
|
|
unregister_netdev(dev);
|
|
|
|
free_irq(dev->irq, dev);
|
|
|
|
iounmap(bp->regs);
|
2013-03-28 07:07:07 +08:00
|
|
|
clk_disable_unprepare(bp->hclk);
|
2006-11-09 21:51:17 +08:00
|
|
|
clk_put(bp->hclk);
|
2013-03-28 07:07:07 +08:00
|
|
|
clk_disable_unprepare(bp->pclk);
|
2006-11-09 21:51:17 +08:00
|
|
|
clk_put(bp->pclk);
|
|
|
|
free_netdev(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-03-04 20:39:29 +08:00
|
|
|
#ifdef CONFIG_PM
|
2013-12-11 08:07:19 +08:00
|
|
|
static int macb_suspend(struct device *dev)
|
2008-03-04 20:39:29 +08:00
|
|
|
{
|
2013-12-11 08:07:19 +08:00
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
2008-03-04 20:39:29 +08:00
|
|
|
struct net_device *netdev = platform_get_drvdata(pdev);
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
|
2012-07-04 07:14:13 +08:00
|
|
|
netif_carrier_off(netdev);
|
2008-03-04 20:39:29 +08:00
|
|
|
netif_device_detach(netdev);
|
|
|
|
|
2013-03-28 07:07:07 +08:00
|
|
|
clk_disable_unprepare(bp->hclk);
|
|
|
|
clk_disable_unprepare(bp->pclk);
|
2008-03-04 20:39:29 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-11 08:07:19 +08:00
|
|
|
static int macb_resume(struct device *dev)
|
2008-03-04 20:39:29 +08:00
|
|
|
{
|
2013-12-11 08:07:19 +08:00
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
2008-03-04 20:39:29 +08:00
|
|
|
struct net_device *netdev = platform_get_drvdata(pdev);
|
|
|
|
struct macb *bp = netdev_priv(netdev);
|
|
|
|
|
2013-03-28 07:07:07 +08:00
|
|
|
clk_prepare_enable(bp->pclk);
|
|
|
|
clk_prepare_enable(bp->hclk);
|
2008-03-04 20:39:29 +08:00
|
|
|
|
|
|
|
netif_device_attach(netdev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-12-11 08:07:19 +08:00
|
|
|
static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
|
|
|
|
|
2006-11-09 21:51:17 +08:00
|
|
|
static struct platform_driver macb_driver = {
|
2008-01-31 20:10:22 +08:00
|
|
|
.remove = __exit_p(macb_remove),
|
2006-11-09 21:51:17 +08:00
|
|
|
.driver = {
|
|
|
|
.name = "macb",
|
2008-04-19 04:50:44 +08:00
|
|
|
.owner = THIS_MODULE,
|
2011-11-18 22:29:25 +08:00
|
|
|
.of_match_table = of_match_ptr(macb_dt_ids),
|
2013-12-11 08:07:19 +08:00
|
|
|
.pm = &macb_pm_ops,
|
2006-11-09 21:51:17 +08:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2013-03-05 00:43:18 +08:00
|
|
|
module_platform_driver_probe(macb_driver, macb_probe);
|
2006-11-09 21:51:17 +08:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
2011-11-08 18:12:32 +08:00
|
|
|
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
|
2011-05-18 22:49:24 +08:00
|
|
|
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
|
2008-04-19 04:50:44 +08:00
|
|
|
MODULE_ALIAS("platform:macb");
|