Merge branch 'for-david' of git://git.kernel.org/pub/scm/linux/kernel/git/chris/linux-2.6
This commit is contained in:
commit
e74b3f7d56
|
@ -1844,7 +1844,7 @@ P: Haavard Skinnemoen
|
|||
M: hskinnemoen@atmel.com
|
||||
S: Supported
|
||||
|
||||
GENERIC HDLC DRIVER, N2, C101, PCI200SYN and WANXL DRIVERS
|
||||
GENERIC HDLC (WAN) DRIVERS
|
||||
P: Krzysztof Halasa
|
||||
M: khc@pm.waw.pl
|
||||
W: http://www.kernel.org/pub/linux/utils/net/hdlc/
|
||||
|
@ -2243,6 +2243,11 @@ M: dan.j.williams@intel.com
|
|||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
|
||||
P: Krzysztof Halasa
|
||||
M: khc@pm.waw.pl
|
||||
S: Maintained
|
||||
|
||||
INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
|
||||
P: Deepak Saxena
|
||||
M: dsaxena@plexity.net
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#define DEBUG_QMGR 0
|
||||
|
||||
#define HALF_QUEUES 32
|
||||
#define QUEUES 64 /* only 32 lower queues currently supported */
|
||||
#define MAX_QUEUE_LENGTH 4 /* in dwords */
|
||||
|
@ -61,22 +63,51 @@ void qmgr_enable_irq(unsigned int queue);
|
|||
void qmgr_disable_irq(unsigned int queue);
|
||||
|
||||
/* request_ and release_queue() must be called from non-IRQ context */
|
||||
|
||||
#if DEBUG_QMGR
|
||||
extern char qmgr_queue_descs[QUEUES][32];
|
||||
|
||||
int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
|
||||
unsigned int nearly_empty_watermark,
|
||||
unsigned int nearly_full_watermark);
|
||||
unsigned int nearly_full_watermark,
|
||||
const char *desc_format, const char* name);
|
||||
#else
|
||||
int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
|
||||
unsigned int nearly_empty_watermark,
|
||||
unsigned int nearly_full_watermark);
|
||||
#define qmgr_request_queue(queue, len, nearly_empty_watermark, \
|
||||
nearly_full_watermark, desc_format, name) \
|
||||
__qmgr_request_queue(queue, len, nearly_empty_watermark, \
|
||||
nearly_full_watermark)
|
||||
#endif
|
||||
|
||||
void qmgr_release_queue(unsigned int queue);
|
||||
|
||||
|
||||
static inline void qmgr_put_entry(unsigned int queue, u32 val)
|
||||
{
|
||||
extern struct qmgr_regs __iomem *qmgr_regs;
|
||||
#if DEBUG_QMGR
|
||||
BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
|
||||
|
||||
printk(KERN_DEBUG "Queue %s(%i) put %X\n",
|
||||
qmgr_queue_descs[queue], queue, val);
|
||||
#endif
|
||||
__raw_writel(val, &qmgr_regs->acc[queue][0]);
|
||||
}
|
||||
|
||||
static inline u32 qmgr_get_entry(unsigned int queue)
|
||||
{
|
||||
u32 val;
|
||||
extern struct qmgr_regs __iomem *qmgr_regs;
|
||||
return __raw_readl(&qmgr_regs->acc[queue][0]);
|
||||
val = __raw_readl(&qmgr_regs->acc[queue][0]);
|
||||
#if DEBUG_QMGR
|
||||
BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
|
||||
|
||||
printk(KERN_DEBUG "Queue %s(%i) get %X\n",
|
||||
qmgr_queue_descs[queue], queue, val);
|
||||
#endif
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline int qmgr_get_stat1(unsigned int queue)
|
||||
|
|
|
@ -14,8 +14,6 @@
|
|||
#include <linux/module.h>
|
||||
#include <mach/qmgr.h>
|
||||
|
||||
#define DEBUG 0
|
||||
|
||||
struct qmgr_regs __iomem *qmgr_regs;
|
||||
static struct resource *mem_res;
|
||||
static spinlock_t qmgr_lock;
|
||||
|
@ -23,6 +21,10 @@ static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
|
|||
static void (*irq_handlers[HALF_QUEUES])(void *pdev);
|
||||
static void *irq_pdevs[HALF_QUEUES];
|
||||
|
||||
#if DEBUG_QMGR
|
||||
char qmgr_queue_descs[QUEUES][32];
|
||||
#endif
|
||||
|
||||
void qmgr_set_irq(unsigned int queue, int src,
|
||||
void (*handler)(void *pdev), void *pdev)
|
||||
{
|
||||
|
@ -70,6 +72,7 @@ void qmgr_disable_irq(unsigned int queue)
|
|||
spin_lock_irqsave(&qmgr_lock, flags);
|
||||
__raw_writel(__raw_readl(&qmgr_regs->irqen[0]) & ~(1 << queue),
|
||||
&qmgr_regs->irqen[0]);
|
||||
__raw_writel(1 << queue, &qmgr_regs->irqstat[0]); /* clear */
|
||||
spin_unlock_irqrestore(&qmgr_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -81,9 +84,16 @@ static inline void shift_mask(u32 *mask)
|
|||
mask[0] <<= 1;
|
||||
}
|
||||
|
||||
#if DEBUG_QMGR
|
||||
int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
|
||||
unsigned int nearly_empty_watermark,
|
||||
unsigned int nearly_full_watermark)
|
||||
unsigned int nearly_full_watermark,
|
||||
const char *desc_format, const char* name)
|
||||
#else
|
||||
int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
|
||||
unsigned int nearly_empty_watermark,
|
||||
unsigned int nearly_full_watermark)
|
||||
#endif
|
||||
{
|
||||
u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
|
||||
int err;
|
||||
|
@ -151,12 +161,13 @@ int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
|
|||
used_sram_bitmap[2] |= mask[2];
|
||||
used_sram_bitmap[3] |= mask[3];
|
||||
__raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
|
||||
spin_unlock_irq(&qmgr_lock);
|
||||
|
||||
#if DEBUG
|
||||
printk(KERN_DEBUG "qmgr: requested queue %i, addr = 0x%02X\n",
|
||||
queue, addr);
|
||||
#if DEBUG_QMGR
|
||||
snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
|
||||
desc_format, name);
|
||||
printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
|
||||
qmgr_queue_descs[queue], queue, addr);
|
||||
#endif
|
||||
spin_unlock_irq(&qmgr_lock);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
@ -189,6 +200,11 @@ void qmgr_release_queue(unsigned int queue)
|
|||
while (addr--)
|
||||
shift_mask(mask);
|
||||
|
||||
#if DEBUG_QMGR
|
||||
printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
|
||||
qmgr_queue_descs[queue], queue);
|
||||
qmgr_queue_descs[queue][0] = '\x0';
|
||||
#endif
|
||||
__raw_writel(0, &qmgr_regs->sram[queue]);
|
||||
|
||||
used_sram_bitmap[0] &= ~mask[0];
|
||||
|
@ -199,9 +215,10 @@ void qmgr_release_queue(unsigned int queue)
|
|||
spin_unlock_irq(&qmgr_lock);
|
||||
|
||||
module_put(THIS_MODULE);
|
||||
#if DEBUG
|
||||
printk(KERN_DEBUG "qmgr: released queue %i\n", queue);
|
||||
#endif
|
||||
|
||||
while ((addr = qmgr_get_entry(queue)))
|
||||
printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
|
||||
queue, addr);
|
||||
}
|
||||
|
||||
static int qmgr_init(void)
|
||||
|
@ -272,5 +289,10 @@ EXPORT_SYMBOL(qmgr_regs);
|
|||
EXPORT_SYMBOL(qmgr_set_irq);
|
||||
EXPORT_SYMBOL(qmgr_enable_irq);
|
||||
EXPORT_SYMBOL(qmgr_disable_irq);
|
||||
#if DEBUG_QMGR
|
||||
EXPORT_SYMBOL(qmgr_queue_descs);
|
||||
EXPORT_SYMBOL(qmgr_request_queue);
|
||||
#else
|
||||
EXPORT_SYMBOL(__qmgr_request_queue);
|
||||
#endif
|
||||
EXPORT_SYMBOL(qmgr_release_queue);
|
||||
|
|
|
@ -59,7 +59,7 @@ config EP93XX_ETH
|
|||
config IXP4XX_ETH
|
||||
tristate "Intel IXP4xx Ethernet support"
|
||||
depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
|
||||
select MII
|
||||
select PHYLIB
|
||||
help
|
||||
Say Y here if you want to use built-in Ethernet ports
|
||||
on IXP4xx processor.
|
||||
|
|
|
@ -30,12 +30,11 @@
|
|||
#include <linux/etherdevice.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <mach/npe.h>
|
||||
#include <mach/qmgr.h>
|
||||
|
||||
#define DEBUG_QUEUES 0
|
||||
#define DEBUG_DESC 0
|
||||
#define DEBUG_RX 0
|
||||
#define DEBUG_TX 0
|
||||
|
@ -59,7 +58,6 @@
|
|||
#define NAPI_WEIGHT 16
|
||||
#define MDIO_INTERVAL (3 * HZ)
|
||||
#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
|
||||
#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
|
||||
#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
|
||||
|
||||
#define NPE_ID(port_id) ((port_id) >> 4)
|
||||
|
@ -164,15 +162,14 @@ struct port {
|
|||
struct npe *npe;
|
||||
struct net_device *netdev;
|
||||
struct napi_struct napi;
|
||||
struct net_device_stats stat;
|
||||
struct mii_if_info mii;
|
||||
struct delayed_work mdio_thread;
|
||||
struct phy_device *phydev;
|
||||
struct eth_plat_info *plat;
|
||||
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
|
||||
struct desc *desc_tab; /* coherent */
|
||||
u32 desc_tab_phys;
|
||||
int id; /* logical port ID */
|
||||
u16 mii_bmcr;
|
||||
int speed, duplex;
|
||||
u8 firmware[4];
|
||||
};
|
||||
|
||||
/* NPE message structure */
|
||||
|
@ -243,19 +240,20 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
|
|||
|
||||
static spinlock_t mdio_lock;
|
||||
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
|
||||
struct mii_bus *mdio_bus;
|
||||
static int ports_open;
|
||||
static struct port *npe_port_tab[MAX_NPES];
|
||||
static struct dma_pool *dma_pool;
|
||||
|
||||
|
||||
static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
|
||||
int write, u16 cmd)
|
||||
static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
|
||||
int write, u16 cmd)
|
||||
{
|
||||
int cycles = 0;
|
||||
|
||||
if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
|
||||
printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name);
|
||||
return 0;
|
||||
printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (write) {
|
||||
|
@ -274,107 +272,119 @@ static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
|
|||
}
|
||||
|
||||
if (cycles == MAX_MDIO_RETRIES) {
|
||||
printk(KERN_ERR "%s: MII write failed\n", dev->name);
|
||||
return 0;
|
||||
printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
|
||||
phy_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
#if DEBUG_MDIO
|
||||
printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name,
|
||||
cycles);
|
||||
printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
|
||||
phy_id, write ? "write" : "read", cycles);
|
||||
#endif
|
||||
|
||||
if (write)
|
||||
return 0;
|
||||
|
||||
if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
|
||||
printk(KERN_ERR "%s: MII read failed\n", dev->name);
|
||||
return 0;
|
||||
#if DEBUG_MDIO
|
||||
printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
|
||||
phy_id);
|
||||
#endif
|
||||
return 0xFFFF; /* don't return error */
|
||||
}
|
||||
|
||||
return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
|
||||
(__raw_readl(&mdio_regs->mdio_status[1]) << 8);
|
||||
((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
|
||||
}
|
||||
|
||||
static int mdio_read(struct net_device *dev, int phy_id, int location)
|
||||
static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
|
||||
{
|
||||
unsigned long flags;
|
||||
u16 val;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&mdio_lock, flags);
|
||||
val = mdio_cmd(dev, phy_id, location, 0, 0);
|
||||
ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
|
||||
spin_unlock_irqrestore(&mdio_lock, flags);
|
||||
return val;
|
||||
#if DEBUG_MDIO
|
||||
printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
|
||||
phy_id, location, ret);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mdio_write(struct net_device *dev, int phy_id, int location,
|
||||
int val)
|
||||
static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
|
||||
u16 val)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&mdio_lock, flags);
|
||||
mdio_cmd(dev, phy_id, location, 1, val);
|
||||
ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
|
||||
spin_unlock_irqrestore(&mdio_lock, flags);
|
||||
#if DEBUG_MDIO
|
||||
printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n",
|
||||
bus->name, phy_id, location, val, ret);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void phy_reset(struct net_device *dev, int phy_id)
|
||||
static int ixp4xx_mdio_register(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!(mdio_bus = mdiobus_alloc()))
|
||||
return -ENOMEM;
|
||||
|
||||
/* All MII PHY accesses use NPE-B Ethernet registers */
|
||||
spin_lock_init(&mdio_lock);
|
||||
mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
|
||||
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
|
||||
|
||||
mdio_bus->name = "IXP4xx MII Bus";
|
||||
mdio_bus->read = &ixp4xx_mdio_read;
|
||||
mdio_bus->write = &ixp4xx_mdio_write;
|
||||
strcpy(mdio_bus->id, "0");
|
||||
|
||||
if ((err = mdiobus_register(mdio_bus)))
|
||||
mdiobus_free(mdio_bus);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ixp4xx_mdio_remove(void)
|
||||
{
|
||||
mdiobus_unregister(mdio_bus);
|
||||
mdiobus_free(mdio_bus);
|
||||
}
|
||||
|
||||
|
||||
static void ixp4xx_adjust_link(struct net_device *dev)
|
||||
{
|
||||
struct port *port = netdev_priv(dev);
|
||||
int cycles = 0;
|
||||
struct phy_device *phydev = port->phydev;
|
||||
|
||||
mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET);
|
||||
|
||||
while (cycles < MAX_MII_RESET_RETRIES) {
|
||||
if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) {
|
||||
#if DEBUG_MDIO
|
||||
printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n",
|
||||
dev->name, cycles);
|
||||
#endif
|
||||
return;
|
||||
if (!phydev->link) {
|
||||
if (port->speed) {
|
||||
port->speed = 0;
|
||||
printk(KERN_INFO "%s: link down\n", dev->name);
|
||||
}
|
||||
udelay(1);
|
||||
cycles++;
|
||||
return;
|
||||
}
|
||||
|
||||
printk(KERN_ERR "%s: MII reset failed\n", dev->name);
|
||||
}
|
||||
if (port->speed == phydev->speed && port->duplex == phydev->duplex)
|
||||
return;
|
||||
|
||||
static void eth_set_duplex(struct port *port)
|
||||
{
|
||||
if (port->mii.full_duplex)
|
||||
port->speed = phydev->speed;
|
||||
port->duplex = phydev->duplex;
|
||||
|
||||
if (port->duplex)
|
||||
__raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
|
||||
&port->regs->tx_control[0]);
|
||||
else
|
||||
__raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
|
||||
&port->regs->tx_control[0]);
|
||||
}
|
||||
|
||||
|
||||
static void phy_check_media(struct port *port, int init)
|
||||
{
|
||||
if (mii_check_media(&port->mii, 1, init))
|
||||
eth_set_duplex(port);
|
||||
if (port->mii.force_media) { /* mii_check_media() doesn't work */
|
||||
struct net_device *dev = port->netdev;
|
||||
int cur_link = mii_link_ok(&port->mii);
|
||||
int prev_link = netif_carrier_ok(dev);
|
||||
|
||||
if (!prev_link && cur_link) {
|
||||
printk(KERN_INFO "%s: link up\n", dev->name);
|
||||
netif_carrier_on(dev);
|
||||
} else if (prev_link && !cur_link) {
|
||||
printk(KERN_INFO "%s: link down\n", dev->name);
|
||||
netif_carrier_off(dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void mdio_thread(struct work_struct *work)
|
||||
{
|
||||
struct port *port = container_of(work, struct port, mdio_thread.work);
|
||||
|
||||
phy_check_media(port, 0);
|
||||
schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
|
||||
printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
|
||||
dev->name, port->speed, port->duplex ? "full" : "half");
|
||||
}
|
||||
|
||||
|
||||
|
@ -412,47 +422,13 @@ static inline void debug_desc(u32 phys, struct desc *desc)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
|
||||
{
|
||||
#if DEBUG_QUEUES
|
||||
static struct {
|
||||
int queue;
|
||||
char *name;
|
||||
} names[] = {
|
||||
{ TX_QUEUE(0x10), "TX#0 " },
|
||||
{ TX_QUEUE(0x20), "TX#1 " },
|
||||
{ TX_QUEUE(0x00), "TX#2 " },
|
||||
{ RXFREE_QUEUE(0x10), "RX-free#0 " },
|
||||
{ RXFREE_QUEUE(0x20), "RX-free#1 " },
|
||||
{ RXFREE_QUEUE(0x00), "RX-free#2 " },
|
||||
{ TXDONE_QUEUE, "TX-done " },
|
||||
};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(names); i++)
|
||||
if (names[i].queue == queue)
|
||||
break;
|
||||
|
||||
printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
|
||||
i < ARRAY_SIZE(names) ? names[i].name : "",
|
||||
is_get ? "->" : "<-", phys);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline u32 queue_get_entry(unsigned int queue)
|
||||
{
|
||||
u32 phys = qmgr_get_entry(queue);
|
||||
debug_queue(queue, 1, phys);
|
||||
return phys;
|
||||
}
|
||||
|
||||
static inline int queue_get_desc(unsigned int queue, struct port *port,
|
||||
int is_tx)
|
||||
{
|
||||
u32 phys, tab_phys, n_desc;
|
||||
struct desc *tab;
|
||||
|
||||
if (!(phys = queue_get_entry(queue)))
|
||||
if (!(phys = qmgr_get_entry(queue)))
|
||||
return -1;
|
||||
|
||||
phys &= ~0x1F; /* mask out non-address bits */
|
||||
|
@ -468,7 +444,6 @@ static inline int queue_get_desc(unsigned int queue, struct port *port,
|
|||
static inline void queue_put_desc(unsigned int queue, u32 phys,
|
||||
struct desc *desc)
|
||||
{
|
||||
debug_queue(queue, 0, phys);
|
||||
debug_desc(phys, desc);
|
||||
BUG_ON(phys & 0x1F);
|
||||
qmgr_put_entry(queue, phys);
|
||||
|
@ -562,7 +537,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
|
|||
#endif
|
||||
|
||||
if (!skb) {
|
||||
port->stat.rx_dropped++;
|
||||
dev->stats.rx_dropped++;
|
||||
/* put the desc back on RX-ready queue */
|
||||
desc->buf_len = MAX_MRU;
|
||||
desc->pkt_len = 0;
|
||||
|
@ -588,8 +563,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
|
|||
debug_pkt(dev, "eth_poll", skb->data, skb->len);
|
||||
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
port->stat.rx_packets++;
|
||||
port->stat.rx_bytes += skb->len;
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += skb->len;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
/* put the new buffer on RX-free queue */
|
||||
|
@ -617,7 +592,7 @@ static void eth_txdone_irq(void *unused)
|
|||
#if DEBUG_TX
|
||||
printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
|
||||
#endif
|
||||
while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) {
|
||||
while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
|
||||
u32 npe_id, n_desc;
|
||||
struct port *port;
|
||||
struct desc *desc;
|
||||
|
@ -634,8 +609,8 @@ static void eth_txdone_irq(void *unused)
|
|||
debug_desc(phys, desc);
|
||||
|
||||
if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
|
||||
port->stat.tx_packets++;
|
||||
port->stat.tx_bytes += desc->pkt_len;
|
||||
port->netdev->stats.tx_packets++;
|
||||
port->netdev->stats.tx_bytes += desc->pkt_len;
|
||||
|
||||
dma_unmap_tx(port, desc);
|
||||
#if DEBUG_TX
|
||||
|
@ -673,7 +648,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (unlikely(skb->len > MAX_MRU)) {
|
||||
dev_kfree_skb(skb);
|
||||
port->stat.tx_errors++;
|
||||
dev->stats.tx_errors++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -689,7 +664,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
bytes = ALIGN(offset + len, 4);
|
||||
if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
|
||||
dev_kfree_skb(skb);
|
||||
port->stat.tx_dropped++;
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
|
||||
|
@ -703,7 +678,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
#else
|
||||
kfree(mem);
|
||||
#endif
|
||||
port->stat.tx_dropped++;
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -746,12 +721,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
|
||||
|
||||
static struct net_device_stats *eth_stats(struct net_device *dev)
|
||||
{
|
||||
struct port *port = netdev_priv(dev);
|
||||
return &port->stat;
|
||||
}
|
||||
|
||||
static void eth_set_mcast_list(struct net_device *dev)
|
||||
{
|
||||
struct port *port = netdev_priv(dev);
|
||||
|
@ -785,41 +754,80 @@ static void eth_set_mcast_list(struct net_device *dev)
|
|||
static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
|
||||
{
|
||||
struct port *port = netdev_priv(dev);
|
||||
unsigned int duplex_chg;
|
||||
int err;
|
||||
|
||||
if (!netif_running(dev))
|
||||
return -EINVAL;
|
||||
err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg);
|
||||
if (duplex_chg)
|
||||
eth_set_duplex(port);
|
||||
return err;
|
||||
return phy_mii_ioctl(port->phydev, if_mii(req), cmd);
|
||||
}
|
||||
|
||||
/* ethtool support */
|
||||
|
||||
static void ixp4xx_get_drvinfo(struct net_device *dev,
|
||||
struct ethtool_drvinfo *info)
|
||||
{
|
||||
struct port *port = netdev_priv(dev);
|
||||
strcpy(info->driver, DRV_NAME);
|
||||
snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
|
||||
port->firmware[0], port->firmware[1],
|
||||
port->firmware[2], port->firmware[3]);
|
||||
strcpy(info->bus_info, "internal");
|
||||
}
|
||||
|
||||
static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct port *port = netdev_priv(dev);
|
||||
return phy_ethtool_gset(port->phydev, cmd);
|
||||
}
|
||||
|
||||
static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct port *port = netdev_priv(dev);
|
||||
return phy_ethtool_sset(port->phydev, cmd);
|
||||
}
|
||||
|
||||
static int ixp4xx_nway_reset(struct net_device *dev)
|
||||
{
|
||||
struct port *port = netdev_priv(dev);
|
||||
return phy_start_aneg(port->phydev);
|
||||
}
|
||||
|
||||
static struct ethtool_ops ixp4xx_ethtool_ops = {
|
||||
.get_drvinfo = ixp4xx_get_drvinfo,
|
||||
.get_settings = ixp4xx_get_settings,
|
||||
.set_settings = ixp4xx_set_settings,
|
||||
.nway_reset = ixp4xx_nway_reset,
|
||||
.get_link = ethtool_op_get_link,
|
||||
};
|
||||
|
||||
|
||||
static int request_queues(struct port *port)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0);
|
||||
err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
|
||||
"%s:RX-free", port->netdev->name);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0);
|
||||
err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
|
||||
"%s:RX", port->netdev->name);
|
||||
if (err)
|
||||
goto rel_rxfree;
|
||||
|
||||
err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0);
|
||||
err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
|
||||
"%s:TX", port->netdev->name);
|
||||
if (err)
|
||||
goto rel_rx;
|
||||
|
||||
err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0);
|
||||
err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
|
||||
"%s:TX-ready", port->netdev->name);
|
||||
if (err)
|
||||
goto rel_tx;
|
||||
|
||||
/* TX-done queue handles skbs sent out by the NPEs */
|
||||
if (!ports_open) {
|
||||
err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0);
|
||||
err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
|
||||
"%s:TX-done", DRV_NAME);
|
||||
if (err)
|
||||
goto rel_txready;
|
||||
}
|
||||
|
@ -943,10 +951,12 @@ static int eth_open(struct net_device *dev)
|
|||
npe_name(npe));
|
||||
return -EIO;
|
||||
}
|
||||
port->firmware[0] = msg.byte4;
|
||||
port->firmware[1] = msg.byte5;
|
||||
port->firmware[2] = msg.byte6;
|
||||
port->firmware[3] = msg.byte7;
|
||||
}
|
||||
|
||||
mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
|
||||
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.cmd = NPE_VLAN_SETRXQOSENTRY;
|
||||
msg.eth_id = port->id;
|
||||
|
@ -984,6 +994,9 @@ static int eth_open(struct net_device *dev)
|
|||
return err;
|
||||
}
|
||||
|
||||
port->speed = 0; /* force "link up" message */
|
||||
phy_start(port->phydev);
|
||||
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
|
||||
__raw_writel(0x08, &port->regs->random_seed);
|
||||
|
@ -1011,10 +1024,8 @@ static int eth_open(struct net_device *dev)
|
|||
__raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
|
||||
|
||||
napi_enable(&port->napi);
|
||||
phy_check_media(port, 1);
|
||||
eth_set_mcast_list(dev);
|
||||
netif_start_queue(dev);
|
||||
schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
|
||||
|
||||
qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
|
||||
eth_rx_irq, dev);
|
||||
|
@ -1105,25 +1116,31 @@ static int eth_close(struct net_device *dev)
|
|||
printk(KERN_CRIT "%s: unable to disable loopback\n",
|
||||
dev->name);
|
||||
|
||||
port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) &
|
||||
~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
|
||||
mdio_write(dev, port->plat->phy, MII_BMCR,
|
||||
port->mii_bmcr | BMCR_PDOWN);
|
||||
phy_stop(port->phydev);
|
||||
|
||||
if (!ports_open)
|
||||
qmgr_disable_irq(TXDONE_QUEUE);
|
||||
cancel_rearming_delayed_work(&port->mdio_thread);
|
||||
destroy_queues(port);
|
||||
release_queues(port);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops ixp4xx_netdev_ops = {
|
||||
.ndo_open = eth_open,
|
||||
.ndo_stop = eth_close,
|
||||
.ndo_start_xmit = eth_xmit,
|
||||
.ndo_set_multicast_list = eth_set_mcast_list,
|
||||
.ndo_do_ioctl = eth_ioctl,
|
||||
|
||||
};
|
||||
|
||||
static int __devinit eth_init_one(struct platform_device *pdev)
|
||||
{
|
||||
struct port *port;
|
||||
struct net_device *dev;
|
||||
struct eth_plat_info *plat = pdev->dev.platform_data;
|
||||
u32 regs_phys;
|
||||
char phy_id[BUS_ID_SIZE];
|
||||
int err;
|
||||
|
||||
if (!(dev = alloc_etherdev(sizeof(struct port))))
|
||||
|
@ -1152,12 +1169,8 @@ static int __devinit eth_init_one(struct platform_device *pdev)
|
|||
goto err_free;
|
||||
}
|
||||
|
||||
dev->open = eth_open;
|
||||
dev->hard_start_xmit = eth_xmit;
|
||||
dev->stop = eth_close;
|
||||
dev->get_stats = eth_stats;
|
||||
dev->do_ioctl = eth_ioctl;
|
||||
dev->set_multicast_list = eth_set_mcast_list;
|
||||
dev->netdev_ops = &ixp4xx_netdev_ops;
|
||||
dev->ethtool_ops = &ixp4xx_ethtool_ops;
|
||||
dev->tx_queue_len = 100;
|
||||
|
||||
netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
|
||||
|
@ -1190,22 +1203,19 @@ static int __devinit eth_init_one(struct platform_device *pdev)
|
|||
__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
|
||||
udelay(50);
|
||||
|
||||
port->mii.dev = dev;
|
||||
port->mii.mdio_read = mdio_read;
|
||||
port->mii.mdio_write = mdio_write;
|
||||
port->mii.phy_id = plat->phy;
|
||||
port->mii.phy_id_mask = 0x1F;
|
||||
port->mii.reg_num_mask = 0x1F;
|
||||
snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy);
|
||||
port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
|
||||
PHY_INTERFACE_MODE_MII);
|
||||
if (IS_ERR(port->phydev)) {
|
||||
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
|
||||
return PTR_ERR(port->phydev);
|
||||
}
|
||||
|
||||
port->phydev->irq = PHY_POLL;
|
||||
|
||||
printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
|
||||
npe_name(port->npe));
|
||||
|
||||
phy_reset(dev, plat->phy);
|
||||
port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
|
||||
~(BMCR_RESET | BMCR_PDOWN);
|
||||
mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
|
||||
|
||||
INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
|
||||
return 0;
|
||||
|
||||
err_unreg:
|
||||
|
@ -1231,7 +1241,7 @@ static int __devexit eth_remove_one(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver drv = {
|
||||
static struct platform_driver ixp4xx_eth_driver = {
|
||||
.driver.name = DRV_NAME,
|
||||
.probe = eth_init_one,
|
||||
.remove = eth_remove_one,
|
||||
|
@ -1239,20 +1249,19 @@ static struct platform_driver drv = {
|
|||
|
||||
static int __init eth_init_module(void)
|
||||
{
|
||||
int err;
|
||||
if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
|
||||
return -ENOSYS;
|
||||
|
||||
/* All MII PHY accesses use NPE-B Ethernet registers */
|
||||
spin_lock_init(&mdio_lock);
|
||||
mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
|
||||
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
|
||||
|
||||
return platform_driver_register(&drv);
|
||||
if ((err = ixp4xx_mdio_register()))
|
||||
return err;
|
||||
return platform_driver_register(&ixp4xx_eth_driver);
|
||||
}
|
||||
|
||||
static void __exit eth_cleanup_module(void)
|
||||
{
|
||||
platform_driver_unregister(&drv);
|
||||
platform_driver_unregister(&ixp4xx_eth_driver);
|
||||
ixp4xx_mdio_remove();
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Krzysztof Halasa");
|
||||
|
|
|
@ -335,6 +335,13 @@ config DSCC4_PCI_RST
|
|||
|
||||
Say Y if your card supports this feature.
|
||||
|
||||
config IXP4XX_HSS
|
||||
tristate "Intel IXP4xx HSS (synchronous serial port) support"
|
||||
depends on HDLC && ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
|
||||
help
|
||||
Say Y here if you want to use built-in HSS ports
|
||||
on IXP4xx processor.
|
||||
|
||||
config DLCI
|
||||
tristate "Frame Relay DLCI support"
|
||||
---help---
|
||||
|
|
|
@ -41,6 +41,7 @@ obj-$(CONFIG_C101) += c101.o
|
|||
obj-$(CONFIG_WANXL) += wanxl.o
|
||||
obj-$(CONFIG_PCI200SYN) += pci200syn.o
|
||||
obj-$(CONFIG_PC300TOO) += pc300too.o
|
||||
obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
|
||||
|
||||
clean-files := wanxlfw.inc
|
||||
$(obj)/wanxl.o: $(obj)/wanxlfw.inc
|
||||
|
|
|
@ -303,7 +303,7 @@ static int cp_table[EVENTS][STATES] = {
|
|||
STA: RTR must supply id
|
||||
SCJ: RUC must supply CP packet len and data */
|
||||
static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
|
||||
u8 id, unsigned int len, void *data)
|
||||
u8 id, unsigned int len, const void *data)
|
||||
{
|
||||
int old_state, action;
|
||||
struct ppp *ppp = get_ppp(dev);
|
||||
|
@ -374,11 +374,12 @@ static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
|
|||
|
||||
|
||||
static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
|
||||
unsigned int len, u8 *data)
|
||||
unsigned int req_len, const u8 *data)
|
||||
{
|
||||
static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 };
|
||||
u8 *opt, *out;
|
||||
unsigned int nak_len = 0, rej_len = 0;
|
||||
const u8 *opt;
|
||||
u8 *out;
|
||||
unsigned int len = req_len, nak_len = 0, rej_len = 0;
|
||||
|
||||
if (!(out = kmalloc(len, GFP_ATOMIC))) {
|
||||
dev->stats.rx_dropped++;
|
||||
|
@ -423,7 +424,7 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
|
|||
else if (nak_len)
|
||||
ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out);
|
||||
else
|
||||
ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, len, data);
|
||||
ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
|
||||
|
||||
kfree(out);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue