Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (37 commits)
  ucc_geth: use correct UCCE macros
  net_dma: acquire/release dma channels on ifup/ifdown
  cxgb3: Keep LRO off if disabled when interface is down
  sfc: SFT9001: Fix condition for LNPGA power-off
  dccp ccid-3: Fix RFC reference
  smsc911x: register irq with device name, not driver name
  smsc911x: fix smsc911x_reg_read compiler warning
  forcedeth: napi schedule lock fix
  net: fix section mismatch warnings in dccp/ccids/lib/tfrc.c
  forcedeth: remove mgmt unit for mcp79 chipset
  qlge: Remove dynamic alloc of rx ring control blocks.
  qlge: Fix schedule while atomic issue.
  qlge: Remove support for device ID 8000.
  qlge: Get rid of split addresses in hardware control blocks.
  qlge: Get rid of volatile usage for shadow register.
  forcedeth: version bump and copyright
  forcedeth: xmit lock fix
  netdev: missing validate_address hooks
  netdev: add missing set_mac_address hook
  netdev: gianfar: add MII ioctl handler
  ...
This commit is contained in:
Linus Torvalds 2009-01-12 16:22:31 -08:00
commit 23ead72912
59 changed files with 338 additions and 368 deletions

View File

@ -177,6 +177,7 @@ static const struct net_device_ops el2_netdev_ops = {
.ndo_get_stats = eip_get_stats,
.ndo_set_multicast_list = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = eip_poll,

View File

@ -3109,6 +3109,8 @@ static void acpi_set_WOL(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
device_set_wakeup_enable(vp->gendev, vp->enable_wol);
if (vp->enable_wol) {
/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
EL3WINDOW(7);

View File

@ -1821,6 +1821,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_open = cp_open,
.ndo_stop = cp_close,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_multicast_list = cp_set_rx_mode,
.ndo_get_stats = cp_get_stats,
.ndo_do_ioctl = cp_ioctl,
@ -1832,6 +1833,7 @@ static const struct net_device_ops cp_netdev_ops = {
#ifdef BROKEN
.ndo_change_mtu = cp_change_mtu,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cp_poll_controller,
#endif

View File

@ -917,6 +917,7 @@ static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_stop = rtl8139_close,
.ndo_get_stats = rtl8139_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_start_xmit = rtl8139_start_xmit,
.ndo_set_multicast_list = rtl8139_set_rx_mode,
.ndo_do_ioctl = netdev_ioctl,
@ -924,7 +925,6 @@ static const struct net_device_ops rtl8139_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8139_poll_controller,
#endif
};
static int __devinit rtl8139_init_one (struct pci_dev *pdev,

View File

@ -63,6 +63,7 @@ const struct net_device_ops ei_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -68,6 +68,7 @@ const struct net_device_ops eip_netdev_ops = {
.ndo_get_stats = eip_get_stats,
.ndo_set_multicast_list = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = eip_poll,

View File

@ -1600,7 +1600,7 @@ config 8139_OLD_RX_RESET
old RX-reset behavior. If unsure, say N.
config R6040
tristate "RDC R6040 Fast Ethernet Adapter support (EXPERIMENTAL)"
tristate "RDC R6040 Fast Ethernet Adapter support"
depends on NET_PCI && PCI
select CRC32
select MII

View File

@ -460,6 +460,7 @@ static const struct net_device_ops ace_netdev_ops = {
.ndo_get_stats = ace_get_stats,
.ndo_start_xmit = ace_start_xmit,
.ndo_set_multicast_list = ace_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ace_set_mac_addr,
.ndo_change_mtu = ace_change_mtu,
#if ACENIC_DO_VLAN

View File

@ -646,6 +646,7 @@ static const struct net_device_ops etherh_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_addr = eth_set_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -1357,6 +1357,7 @@ static const struct net_device_ops ks8695_netdev_ops = {
.ndo_start_xmit = ks8695_start_xmit,
.ndo_tx_timeout = ks8695_timeout,
.ndo_set_mac_address = ks8695_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = ks8695_set_multicast,
};

View File

@ -73,8 +73,8 @@
(BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
#define RX_PKT_OFFSET 30
#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64)
#define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
/* minimum number of free TX descriptors required to wake up TX process */
#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
@ -682,7 +682,6 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
}
rh = (struct rx_header *) skb->data;
skb_reserve(skb, RX_PKT_OFFSET);
rh->len = 0;
rh->flags = 0;
@ -693,13 +692,13 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
if (src_map != NULL)
src_map->skb = NULL;
ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
if (dest_idx == (B44_RX_RING_SIZE - 1))
ctrl |= DESC_CTRL_EOT;
dp = &bp->rx_ring[dest_idx];
dp->ctrl = cpu_to_le32(ctrl);
dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
if (bp->flags & B44_FLAG_RX_RING_HACK)
b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
@ -809,8 +808,8 @@ static int b44_rx(struct b44 *bp, int budget)
ssb_dma_unmap_single(bp->sdev, map,
skb_size, DMA_FROM_DEVICE);
/* Leave out rx_header */
skb_put(skb, len + RX_PKT_OFFSET);
skb_pull(skb, RX_PKT_OFFSET);
skb_put(skb, len + RX_PKT_OFFSET);
skb_pull(skb, RX_PKT_OFFSET);
} else {
struct sk_buff *copy_skb;

View File

@ -50,12 +50,17 @@ struct vlan_group;
struct adapter;
struct sge_qset;
enum { /* rx_offload flags */
T3_RX_CSUM = 1 << 0,
T3_LRO = 1 << 1,
};
struct port_info {
struct adapter *adapter;
struct vlan_group *vlan_grp;
struct sge_qset *qs;
u8 port_id;
u8 rx_csum_offload;
u8 rx_offload;
u8 nqsets;
u8 first_qset;
struct cphy phy;

View File

@ -546,7 +546,7 @@ static int setup_sge_qsets(struct adapter *adap)
pi->qs = &adap->sge.qs[pi->first_qset];
for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
++j, ++qset_idx) {
set_qset_lro(dev, qset_idx, pi->rx_csum_offload);
set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
err = t3_sge_alloc_qset(adap, qset_idx, 1,
(adap->flags & USING_MSIX) ? qset_idx + 1 :
irq_idx,
@ -1657,17 +1657,19 @@ static u32 get_rx_csum(struct net_device *dev)
{
struct port_info *p = netdev_priv(dev);
return p->rx_csum_offload;
return p->rx_offload & T3_RX_CSUM;
}
static int set_rx_csum(struct net_device *dev, u32 data)
{
struct port_info *p = netdev_priv(dev);
p->rx_csum_offload = data;
if (!data) {
if (data) {
p->rx_offload |= T3_RX_CSUM;
} else {
int i;
p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
set_qset_lro(dev, i, 0);
}
@ -1830,15 +1832,18 @@ static int cxgb3_set_flags(struct net_device *dev, u32 data)
int i;
if (data & ETH_FLAG_LRO) {
if (!pi->rx_csum_offload)
if (!(pi->rx_offload & T3_RX_CSUM))
return -EINVAL;
pi->rx_offload |= T3_LRO;
for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
set_qset_lro(dev, i, 1);
} else
} else {
pi->rx_offload &= ~T3_LRO;
for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
set_qset_lro(dev, i, 0);
}
return 0;
}
@ -1926,7 +1931,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
pi = adap2pinfo(adapter, i);
if (t.qset_idx >= pi->first_qset &&
t.qset_idx < pi->first_qset + pi->nqsets &&
!pi->rx_csum_offload)
!(pi->rx_offload & T3_RX_CSUM))
return -EINVAL;
}
@ -2946,7 +2951,7 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->port[i] = netdev;
pi = netdev_priv(netdev);
pi->adapter = adapter;
pi->rx_csum_offload = 1;
pi->rx_offload = T3_RX_CSUM | T3_LRO;
pi->port_id = i;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
@ -2955,6 +2960,7 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->mem_end = mmio_start + mmio_len - 1;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
netdev->features |= NETIF_F_LLTX;
netdev->features |= NETIF_F_LRO;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;

View File

@ -1932,7 +1932,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
skb_pull(skb, sizeof(*p) + pad);
skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
pi = netdev_priv(skb->dev);
if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) &&
!p->fragment) {
qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY;

View File

@ -390,7 +390,8 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
}
static DEFINE_MUTEX(nvm_mutex);
static pid_t nvm_owner = -1;
static pid_t nvm_owner_pid = -1;
static char nvm_owner_name[TASK_COMM_LEN] = "";
/**
* e1000_acquire_swflag_ich8lan - Acquire software control flag
@ -408,11 +409,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
might_sleep();
if (!mutex_trylock(&nvm_mutex)) {
WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n",
nvm_owner);
WARN(1, KERN_ERR "e1000e mutex contention. Owned by process "
"%s (pid %d), required by process %s (pid %d)\n",
nvm_owner_name, nvm_owner_pid,
current->comm, current->pid);
mutex_lock(&nvm_mutex);
}
nvm_owner = current->pid;
nvm_owner_pid = current->pid;
strncpy(nvm_owner_name, current->comm, TASK_COMM_LEN);
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL);
@ -430,7 +435,8 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
nvm_owner = -1;
nvm_owner_pid = -1;
strcpy(nvm_owner_name, "");
mutex_unlock(&nvm_mutex);
return -E1000_ERR_CONFIG;
}
@ -454,7 +460,8 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
nvm_owner = -1;
nvm_owner_pid = -1;
strcpy(nvm_owner_name, "");
mutex_unlock(&nvm_mutex);
}

View File

@ -169,6 +169,7 @@ static const struct net_device_ops e21_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -1599,6 +1599,7 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_start_xmit = enic_hard_start_xmit,
.ndo_get_stats = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_multicast_list = enic_set_multicast_list,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_register = enic_vlan_rx_register,

View File

@ -13,7 +13,7 @@
* Copyright (C) 2004 Andrew de Quincey (wol support)
* Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
* IRQ rate fixes, bigendian fixes, cleanups, verification)
* Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
* Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -39,7 +39,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
#define FORCEDETH_VERSION "0.61"
#define FORCEDETH_VERSION "0.62"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@ -2096,14 +2096,15 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
empty_slots = nv_get_empty_tx_slots(np);
if (unlikely(empty_slots <= entries)) {
spin_lock_irqsave(&np->lock, flags);
netif_stop_queue(dev);
np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags);
return NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&np->lock, flags);
start_tx = put_tx = np->put_tx.orig;
@ -2214,14 +2215,15 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
empty_slots = nv_get_empty_tx_slots(np);
if (unlikely(empty_slots <= entries)) {
spin_lock_irqsave(&np->lock, flags);
netif_stop_queue(dev);
np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags);
return NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&np->lock, flags);
start_tx = put_tx = np->put_tx.ex;
start_tx_ctx = np->put_tx_ctx;
@ -3403,10 +3405,10 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
#ifdef CONFIG_FORCEDETH_NAPI
if (events & NVREG_IRQ_RX_ALL) {
spin_lock(&np->lock);
netif_rx_schedule(&np->napi);
/* Disable furthur receive irq's */
spin_lock(&np->lock);
np->irqmask &= ~NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED)
@ -3520,10 +3522,10 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
#ifdef CONFIG_FORCEDETH_NAPI
if (events & NVREG_IRQ_RX_ALL) {
spin_lock(&np->lock);
netif_rx_schedule(&np->napi);
/* Disable furthur receive irq's */
spin_lock(&np->lock);
np->irqmask &= ~NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED)
@ -6167,19 +6169,19 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{0,},
};

View File

@ -296,6 +296,20 @@ err_out:
return err;
}
/* Ioctl MII Interface */
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct gfar_private *priv = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
if (!priv->phydev)
return -ENODEV;
return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
}
/* Set up the ethernet device structure, private data,
* and anything else we need before we start */
static int gfar_probe(struct of_device *ofdev,
@ -366,6 +380,7 @@ static int gfar_probe(struct of_device *ofdev,
dev->set_multicast_list = gfar_set_multi;
dev->ethtool_ops = &gfar_ethtool_ops;
dev->do_ioctl = gfar_ioctl;
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
priv->rx_csum_enable = 1;

View File

@ -576,6 +576,7 @@ static const struct net_device_ops hamachi_netdev_ops = {
.ndo_set_multicast_list = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = hamachi_tx_timeout,
.ndo_do_ioctl = netdev_ioctl,
};

View File

@ -717,11 +717,12 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct sixpack *sp = sp_get(tty);
struct net_device *dev = sp->dev;
struct net_device *dev;
unsigned int tmp, err;
if (!sp)
return -ENXIO;
dev = sp->dev;
switch(cmd) {
case SIOCGIFNAME:

View File

@ -166,6 +166,7 @@ static const struct net_device_ops hpp_netdev_ops = {
.ndo_get_stats = eip_get_stats,
.ndo_set_multicast_list = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = eip_poll,

View File

@ -103,6 +103,7 @@ static const struct net_device_ops hydra_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -594,7 +594,7 @@ static int au1k_irda_rx(struct net_device *dev)
update_rx_stats(dev, flags, count);
skb=alloc_skb(count+1,GFP_ATOMIC);
if (skb == NULL) {
aup->stats.rx_dropped++;
aup->netdev->stats.rx_dropped++;
continue;
}
skb_reserve(skb, 1);

View File

@ -1194,13 +1194,13 @@ toshoboe_interrupt (int irq, void *dev_id)
txp = txpc;
txpc++;
txpc %= TX_SLOTS;
self->stats.tx_packets++;
self->netdev->stats.tx_packets++;
if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX;
}
self->stats.tx_packets--;
self->netdev->stats.tx_packets--;
#else
self->stats.tx_packets++;
self->netdev->stats.tx_packets++;
#endif
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
}
@ -1280,7 +1280,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
skb_put (skb, len);
skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs],
len);
self->stats.rx_packets++;
self->netdev->stats.rx_packets++;
skb->dev = self->netdev;
skb_reset_mac_header(skb);
skb->protocol = htons (ETH_P_IRDA);

View File

@ -486,6 +486,7 @@ static const struct net_device_ops mac8390_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -952,6 +952,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_get_stats = mlx4_en_get_stats,
.ndo_set_multicast_list = mlx4_en_set_multicast,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_register = mlx4_en_vlan_rx_register,

View File

@ -202,6 +202,7 @@ static const struct net_device_ops ne_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -208,6 +208,7 @@ static const struct net_device_ops ne2k_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -1956,6 +1956,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_change_mtu = ns83820_change_mtu,
.ndo_set_multicast_list = ns83820_set_multicast,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = ns83820_tx_timeout,
#ifdef NS83820_VLAN_ACCEL_SUPPORT
.ndo_vlan_rx_register = ns83820_vlan_rx_register,

View File

@ -28,11 +28,11 @@
} while (0)
#define QLGE_VENDOR_ID 0x1077
#define QLGE_DEVICE_ID1 0x8012
#define QLGE_DEVICE_ID 0x8000
#define QLGE_DEVICE_ID 0x8012
#define MAX_RX_RINGS 128
#define MAX_TX_RINGS 128
#define MAX_CPUS 8
#define MAX_TX_RINGS MAX_CPUS
#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
#define NUM_TX_RING_ENTRIES 256
#define NUM_RX_RING_ENTRIES 256
@ -45,6 +45,7 @@
#define MAX_SPLIT_SIZE 1023
#define QLGE_SB_PAD 32
#define MAX_CQ 128
#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
@ -961,8 +962,7 @@ struct ib_mac_iocb_rsp {
#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
__le32 data_len; /* */
__le32 data_addr_lo; /* */
__le32 data_addr_hi; /* */
__le64 data_addr; /* */
__le32 rss; /* */
__le16 vlan_id; /* 12 bits */
#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
@ -976,8 +976,7 @@ struct ib_mac_iocb_rsp {
#define IB_MAC_IOCB_RSP_HS 0x40
#define IB_MAC_IOCB_RSP_HL 0x80
__le32 hdr_len; /* */
__le32 hdr_addr_lo; /* */
__le32 hdr_addr_hi; /* */
__le64 hdr_addr; /* */
} __attribute((packed));
struct ib_ae_iocb_rsp {
@ -1042,10 +1041,8 @@ struct wqicb {
__le16 cq_id_rss;
#define Q_CQ_ID_RSS_RV 0x8000
__le16 rid;
__le32 addr_lo;
__le32 addr_hi;
__le32 cnsmr_idx_addr_lo;
__le32 cnsmr_idx_addr_hi;
__le64 addr;
__le64 cnsmr_idx_addr;
} __attribute((packed));
/*
@ -1070,18 +1067,14 @@ struct cqicb {
#define LEN_CPP_64 0x0002
#define LEN_CPP_128 0x0003
__le16 rid;
__le32 addr_lo;
__le32 addr_hi;
__le32 prod_idx_addr_lo;
__le32 prod_idx_addr_hi;
__le64 addr;
__le64 prod_idx_addr;
__le16 pkt_delay;
__le16 irq_delay;
__le32 lbq_addr_lo;
__le32 lbq_addr_hi;
__le64 lbq_addr;
__le16 lbq_buf_size;
__le16 lbq_len; /* entry count */
__le32 sbq_addr_lo;
__le32 sbq_addr_hi;
__le64 sbq_addr;
__le16 sbq_buf_size;
__le16 sbq_len; /* entry count */
} __attribute((packed));
@ -1145,7 +1138,7 @@ struct tx_ring {
struct wqicb wqicb; /* structure used to inform chip of new queue */
void *wq_base; /* pci_alloc:virtual addr for tx */
dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
u32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
__le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
u32 wq_size; /* size in bytes of queue area */
u32 wq_len; /* number of entries in queue */
@ -1181,7 +1174,7 @@ struct rx_ring {
u32 cq_size;
u32 cq_len;
u16 cq_id;
volatile __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
__le32 *prod_idx_sh_reg; /* Shadowed producer register. */
dma_addr_t prod_idx_sh_reg_dma;
void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
u32 cnsmr_idx; /* current sw idx */
@ -1402,9 +1395,11 @@ struct ql_adapter {
int rx_ring_count;
int ring_mem_size;
void *ring_mem;
struct rx_ring *rx_ring;
struct rx_ring rx_ring[MAX_RX_RINGS];
struct tx_ring tx_ring[MAX_TX_RINGS];
int rx_csum;
struct tx_ring *tx_ring;
u32 default_rx_queue;
u16 rx_coalesce_usecs; /* cqicb->int_delay */
@ -1459,6 +1454,24 @@ static inline void ql_write_db_reg(u32 val, void __iomem *addr)
mmiowb();
}
/*
* Shadow Registers:
* Outbound queues have a consumer index that is maintained by the chip.
* Inbound queues have a producer index that is maintained by the chip.
* For lower overhead, these registers are "shadowed" to host memory
* which allows the device driver to track the queue progress without
* PCI reads. When an entry is placed on an inbound queue, the chip will
* update the relevant index register and then copy the value to the
* shadow register in host memory.
*/
static inline u32 ql_read_sh_reg(__le32 *addr)
{
u32 reg;
reg = le32_to_cpu(*addr);
rmb();
return reg;
}
extern char qlge_driver_name[];
extern const char qlge_driver_version[];
extern const struct ethtool_ops qlge_ethtool_ops;

View File

@ -435,14 +435,10 @@ void ql_dump_wqicb(struct wqicb *wqicb)
printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
le16_to_cpu(wqicb->cq_id_rss));
printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n",
le32_to_cpu(wqicb->addr_lo));
printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n",
le32_to_cpu(wqicb->addr_hi));
printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n",
le32_to_cpu(wqicb->cnsmr_idx_addr_lo));
printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n",
le32_to_cpu(wqicb->cnsmr_idx_addr_hi));
printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n",
(unsigned long long) le64_to_cpu(wqicb->addr));
printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n",
(unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
}
void ql_dump_tx_ring(struct tx_ring *tx_ring)
@ -455,10 +451,11 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring)
printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
(unsigned long long) tx_ring->wq_base_dma);
printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n",
tx_ring->cnsmr_idx_sh_reg);
printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n",
(unsigned long long) tx_ring->cnsmr_idx_sh_reg_dma);
printk(KERN_ERR PFX
"tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n",
tx_ring->cnsmr_idx_sh_reg,
tx_ring->cnsmr_idx_sh_reg
? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
@ -510,30 +507,22 @@ void ql_dump_cqicb(struct cqicb *cqicb)
printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n",
le32_to_cpu(cqicb->addr_lo));
printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n",
le32_to_cpu(cqicb->addr_hi));
printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n",
le32_to_cpu(cqicb->prod_idx_addr_lo));
printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n",
le32_to_cpu(cqicb->prod_idx_addr_hi));
printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n",
(unsigned long long) le64_to_cpu(cqicb->addr));
printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n",
(unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
le16_to_cpu(cqicb->pkt_delay));
printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
le16_to_cpu(cqicb->irq_delay));
printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n",
le32_to_cpu(cqicb->lbq_addr_lo));
printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n",
le32_to_cpu(cqicb->lbq_addr_hi));
printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n",
(unsigned long long) le64_to_cpu(cqicb->lbq_addr));
printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
le16_to_cpu(cqicb->lbq_buf_size));
printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
le16_to_cpu(cqicb->lbq_len));
printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n",
le32_to_cpu(cqicb->sbq_addr_lo));
printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n",
le32_to_cpu(cqicb->sbq_addr_hi));
printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n",
(unsigned long long) le64_to_cpu(cqicb->sbq_addr));
printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
le16_to_cpu(cqicb->sbq_buf_size));
printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
@ -558,9 +547,10 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
printk(KERN_ERR PFX
"rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n",
"rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n",
rx_ring->prod_idx_sh_reg,
rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0);
rx_ring->prod_idx_sh_reg
? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
(unsigned long long) rx_ring->prod_idx_sh_reg_dma);
printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
@ -809,10 +799,8 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
printk(KERN_ERR PFX "data_len = %d\n",
le32_to_cpu(ib_mac_rsp->data_len));
printk(KERN_ERR PFX "data_addr_hi = 0x%x\n",
le32_to_cpu(ib_mac_rsp->data_addr_hi));
printk(KERN_ERR PFX "data_addr_lo = 0x%x\n",
le32_to_cpu(ib_mac_rsp->data_addr_lo));
printk(KERN_ERR PFX "data_addr = 0x%llx\n",
(unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
printk(KERN_ERR PFX "rss = %x\n",
le32_to_cpu(ib_mac_rsp->rss));
@ -828,10 +816,8 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
printk(KERN_ERR PFX "hdr length = %d.\n",
le32_to_cpu(ib_mac_rsp->hdr_len));
printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n",
le32_to_cpu(ib_mac_rsp->hdr_addr_hi));
printk(KERN_ERR PFX "hdr addr_lo = 0x%x.\n",
le32_to_cpu(ib_mac_rsp->hdr_addr_lo));
printk(KERN_ERR PFX "hdr addr = 0x%llx.\n",
(unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
}
}
#endif

View File

@ -76,7 +76,6 @@ MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
/* required last entry */
{0,}
};
@ -127,12 +126,12 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
{
unsigned int seconds = 3;
unsigned int wait_count = 30;
do {
if (!ql_sem_trylock(qdev, sem_mask))
return 0;
ssleep(1);
} while (--seconds);
udelay(100);
} while (--wait_count);
return -ETIMEDOUT;
}
@ -1545,7 +1544,7 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
{
struct ql_adapter *qdev = rx_ring->qdev;
u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
struct ob_mac_iocb_rsp *net_rsp = NULL;
int count = 0;
@ -1571,7 +1570,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
}
count++;
ql_update_cq(rx_ring);
prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
}
ql_write_cq_idx(rx_ring);
if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
@ -1591,7 +1590,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
{
struct ql_adapter *qdev = rx_ring->qdev;
u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
struct ql_net_rsp_iocb *net_rsp;
int count = 0;
@ -1624,7 +1623,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
}
count++;
ql_update_cq(rx_ring);
prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
if (count == budget)
break;
}
@ -1787,7 +1786,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* Check the default queue and wake handler if active.
*/
rx_ring = &qdev->rx_ring[0];
if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
@ -1801,7 +1800,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
for (i = 1; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) !=
if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
rx_ring->cnsmr_idx) {
QPRINTK(qdev, INTR, INFO,
"Waking handler for rx_ring[%d].\n", i);
@ -2356,28 +2355,6 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
}
}
static void ql_free_ring_cb(struct ql_adapter *qdev)
{
kfree(qdev->ring_mem);
}
static int ql_alloc_ring_cb(struct ql_adapter *qdev)
{
/* Allocate space for tx/rx ring control blocks. */
qdev->ring_mem_size =
(qdev->tx_ring_count * sizeof(struct tx_ring)) +
(qdev->rx_ring_count * sizeof(struct rx_ring));
qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
if (qdev->ring_mem == NULL) {
return -ENOMEM;
} else {
qdev->rx_ring = qdev->ring_mem;
qdev->tx_ring = qdev->ring_mem +
(qdev->rx_ring_count * sizeof(struct rx_ring));
}
return 0;
}
static void ql_free_mem_resources(struct ql_adapter *qdev)
{
int i;
@ -2467,12 +2444,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
cqicb->prod_idx_addr_hi =
cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
/*
* Set up the control block load flags.
@ -2483,10 +2457,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
*((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
cqicb->lbq_addr_lo =
cpu_to_le32(rx_ring->lbq_base_indirect_dma);
cqicb->lbq_addr_hi =
cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
cqicb->lbq_addr =
cpu_to_le64(rx_ring->lbq_base_indirect_dma);
bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
(u16) rx_ring->lbq_buf_size;
cqicb->lbq_buf_size = cpu_to_le16(bq_len);
@ -2501,10 +2473,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */
*((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
cqicb->sbq_addr_lo =
cpu_to_le32(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_addr_hi =
cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_buf_size =
cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
bq_len = (rx_ring->sbq_len == 65536) ? 0 :
@ -2611,12 +2581,9 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
wqicb->rid = 0;
wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
wqicb->cnsmr_idx_addr_hi =
cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
ql_init_tx_ring(qdev, tx_ring);
@ -2746,14 +2713,14 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
* Outbound queue is for outbound completions only.
*/
intr_context->handler = qlge_msix_tx_isr;
sprintf(intr_context->name, "%s-txq-%d",
sprintf(intr_context->name, "%s-tx-%d",
qdev->ndev->name, i);
} else {
/*
* Inbound queues handle unicast frames only.
*/
intr_context->handler = qlge_msix_rx_isr;
sprintf(intr_context->name, "%s-rxq-%d",
sprintf(intr_context->name, "%s-rx-%d",
qdev->ndev->name, i);
}
}
@ -3247,7 +3214,6 @@ static int qlge_close(struct net_device *ndev)
msleep(1);
ql_adapter_down(qdev);
ql_release_adapter_resources(qdev);
ql_free_ring_cb(qdev);
return 0;
}
@ -3273,8 +3239,8 @@ static int ql_configure_rings(struct ql_adapter *qdev)
* This limitation can be removed when requested.
*/
if (cpu_cnt > 8)
cpu_cnt = 8;
if (cpu_cnt > MAX_CPUS)
cpu_cnt = MAX_CPUS;
/*
* rx_ring[0] is always the default queue.
@ -3294,9 +3260,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
*/
qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
if (ql_alloc_ring_cb(qdev))
return -ENOMEM;
for (i = 0; i < qdev->tx_ring_count; i++) {
tx_ring = &qdev->tx_ring[i];
memset((void *)tx_ring, 0, sizeof(tx_ring));
@ -3393,7 +3356,6 @@ static int qlge_open(struct net_device *ndev)
error_up:
ql_release_adapter_resources(qdev);
ql_free_ring_cb(qdev);
return err;
}

View File

@ -49,8 +49,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
#define DRV_VERSION "0.20"
#define DRV_RELDATE "07Jan2009"
#define DRV_VERSION "0.21"
#define DRV_RELDATE "09Jan2009"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@ -457,22 +457,12 @@ static void r6040_down(struct net_device *dev)
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
free_irq(dev->irq, dev);
/* Free RX buffer */
r6040_free_rxbufs(dev);
/* Free TX buffer */
r6040_free_txbufs(dev);
/* Free Descriptor memory */
pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
}
static int r6040_close(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct pci_dev *pdev = lp->pdev;
/* deleted timer */
del_timer_sync(&lp->timer);
@ -481,8 +471,28 @@ static int r6040_close(struct net_device *dev)
napi_disable(&lp->napi);
netif_stop_queue(dev);
r6040_down(dev);
free_irq(dev->irq, dev);
/* Free RX buffer */
r6040_free_rxbufs(dev);
/* Free TX buffer */
r6040_free_txbufs(dev);
spin_unlock_irq(&lp->lock);
/* Free Descriptor memory */
if (lp->rx_ring) {
pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = 0;
}
if (lp->tx_ring) {
pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
lp->tx_ring = 0;
}
return 0;
}
@ -1049,6 +1059,7 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_set_multicast_list = r6040_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = r6040_ioctl,
.ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
@ -1143,8 +1154,10 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
if (!(adrp[0] || adrp[1] || adrp[2]))
printk(KERN_WARNING DRV_NAME ": MAC address not initialized\n");
if (!(adrp[0] || adrp[1] || adrp[2])) {
printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n");
random_ether_addr(dev->dev_addr);
}
/* Link new device into r6040_root_dev */
lp->pdev = pdev;

View File

@ -1408,6 +1408,7 @@ static const struct net_device_ops sc92031_netdev_ops = {
.ndo_set_multicast_list = sc92031_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = sc92031_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sc92031_poll_controller,

View File

@ -636,10 +636,11 @@ static void tenxpress_phy_fini(struct efx_nic *efx)
{
int reg;
if (efx->phy_type == PHY_TYPE_SFT9001B) {
if (efx->phy_type == PHY_TYPE_SFT9001B)
device_remove_file(&efx->pci_dev->dev,
&dev_attr_phy_short_reach);
} else {
if (efx->phy_type == PHY_TYPE_SFX7101) {
/* Power down the LNPGA */
reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,

View File

@ -389,6 +389,7 @@ static const struct net_device_ops sis900_netdev_ops = {
.ndo_set_multicast_list = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = mii_ioctl,
.ndo_tx_timeout = sis900_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER

View File

@ -192,6 +192,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -196,6 +196,7 @@ static const struct net_device_ops ultra_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -144,6 +144,7 @@ static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
}
BUG();
return 0;
}
static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
@ -1740,6 +1741,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
.ndo_set_multicast_list = smsc911x_set_multicast_list,
.ndo_do_ioctl = smsc911x_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = smsc911x_poll_controller,
#endif
@ -1967,7 +1969,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED,
SMSC_CHIPNAME, dev);
dev->name, dev);
if (retval) {
SMSC_WARNING(PROBE,
"Unable to claim requested irq: %d", dev->irq);

View File

@ -1551,6 +1551,7 @@ static const struct net_device_ops smsc9420_netdev_ops = {
.ndo_set_multicast_list = smsc9420_set_multicast_list,
.ndo_do_ioctl = smsc9420_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = smsc9420_poll_controller,
#endif /* CONFIG_NET_POLL_CONTROLLER */

View File

@ -442,40 +442,30 @@ static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
struct ucc_geth __iomem *ug_regs;
u32 maccfg2, uccm;
uccf = ugeth->uccf;
ug_regs = ugeth->ug_regs;
/* Enable interrupts for magic packet detection */
uccm = in_be32(uccf->p_uccm);
uccm |= UCCE_MPD;
out_be32(uccf->p_uccm, uccm);
setbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD);
/* Enable magic packet detection */
maccfg2 = in_be32(&ug_regs->maccfg2);
maccfg2 |= MACCFG2_MPE;
out_be32(&ug_regs->maccfg2, maccfg2);
setbits32(&ug_regs->maccfg2, MACCFG2_MPE);
}
static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
struct ucc_geth __iomem *ug_regs;
u32 maccfg2, uccm;
uccf = ugeth->uccf;
ug_regs = ugeth->ug_regs;
/* Disable interrupts for magic packet detection */
uccm = in_be32(uccf->p_uccm);
uccm &= ~UCCE_MPD;
out_be32(uccf->p_uccm, uccm);
clrbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD);
/* Disable magic packet detection */
maccfg2 = in_be32(&ug_regs->maccfg2);
maccfg2 &= ~MACCFG2_MPE;
out_be32(&ug_regs->maccfg2, maccfg2);
clrbits32(&ug_regs->maccfg2, MACCFG2_MPE);
}
#endif /* MAGIC_PACKET */
@ -585,7 +575,8 @@ static void get_statistics(struct ucc_geth_private *ugeth,
/* Hardware only if user handed pointer and driver actually
gathers hardware statistics */
if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
if (hardware_statistics &&
(in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
@ -1181,9 +1172,7 @@ int init_flow_control_params(u32 automatic_flow_control_mode,
out_be32(uempr_register, value);
/* Set UPSMR register */
value = in_be32(upsmr_register);
value |= automatic_flow_control_mode;
out_be32(upsmr_register, value);
setbits32(upsmr_register, automatic_flow_control_mode);
value = in_be32(maccfg1_register);
if (rx_flow_control_enable)
@ -1200,14 +1189,11 @@ static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
u32 __iomem *upsmr_register,
u16 __iomem *uescr_register)
{
u32 upsmr_value = 0;
u16 uescr_value = 0;
/* Enable hardware statistics gathering if requested */
if (enable_hardware_statistics) {
upsmr_value = in_be32(upsmr_register);
upsmr_value |= UPSMR_HSE;
out_be32(upsmr_register, upsmr_value);
}
if (enable_hardware_statistics)
setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
/* Clear hardware statistics counters */
uescr_value = in_be16(uescr_register);
@ -1233,23 +1219,17 @@ static int init_firmware_statistics_gathering_mode(int
{
/* Note: this function does not check if */
/* the parameters it receives are NULL */
u16 temoder_value;
u32 remoder_value;
if (enable_tx_firmware_statistics) {
out_be32(tx_rmon_base_ptr,
tx_firmware_statistics_structure_address);
temoder_value = in_be16(temoder_register);
temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
out_be16(temoder_register, temoder_value);
setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
}
if (enable_rx_firmware_statistics) {
out_be32(rx_rmon_base_ptr,
rx_firmware_statistics_structure_address);
remoder_value = in_be32(remoder_register);
remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
out_be32(remoder_register, remoder_value);
setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
}
return 0;
@ -1316,15 +1296,12 @@ static int init_check_frame_length_mode(int length_check,
static int init_preamble_length(u8 preamble_length,
u32 __iomem *maccfg2_register)
{
u32 value = 0;
if ((preamble_length < 3) || (preamble_length > 7))
return -EINVAL;
value = in_be32(maccfg2_register);
value &= ~MACCFG2_PREL_MASK;
value |= (preamble_length << MACCFG2_PREL_SHIFT);
out_be32(maccfg2_register, value);
clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
preamble_length << MACCFG2_PREL_SHIFT);
return 0;
}
@ -1337,19 +1314,19 @@ static int init_rx_parameters(int reject_broadcast,
value = in_be32(upsmr_register);
if (reject_broadcast)
value |= UPSMR_BRO;
value |= UCC_GETH_UPSMR_BRO;
else
value &= ~UPSMR_BRO;
value &= ~UCC_GETH_UPSMR_BRO;
if (receive_short_frames)
value |= UPSMR_RSH;
value |= UCC_GETH_UPSMR_RSH;
else
value &= ~UPSMR_RSH;
value &= ~UCC_GETH_UPSMR_RSH;
if (promiscuous)
value |= UPSMR_PRO;
value |= UCC_GETH_UPSMR_PRO;
else
value &= ~UPSMR_PRO;
value &= ~UCC_GETH_UPSMR_PRO;
out_be32(upsmr_register, value);
@ -1410,26 +1387,27 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
/* Set UPSMR */
upsmr = in_be32(&uf_regs->upsmr);
upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UPSMR_RPM;
upsmr |= UCC_GETH_UPSMR_RPM;
switch (ugeth->max_speed) {
case SPEED_10:
upsmr |= UPSMR_R10M;
upsmr |= UCC_GETH_UPSMR_R10M;
/* FALLTHROUGH */
case SPEED_100:
if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
upsmr |= UPSMR_RMM;
upsmr |= UCC_GETH_UPSMR_RMM;
}
}
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UPSMR_TBIM;
upsmr |= UCC_GETH_UPSMR_TBIM;
}
out_be32(&uf_regs->upsmr, upsmr);
@ -1517,9 +1495,9 @@ static void adjust_link(struct net_device *dev)
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
if (phydev->speed == SPEED_10)
upsmr |= UPSMR_R10M;
upsmr |= UCC_GETH_UPSMR_R10M;
else
upsmr &= ~(UPSMR_R10M);
upsmr &= ~UCC_GETH_UPSMR_R10M;
}
break;
default:
@ -1602,10 +1580,8 @@ static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
uccf = ugeth->uccf;
/* Mask GRACEFUL STOP TX interrupt bit and clear it */
temp = in_be32(uccf->p_uccm);
temp &= ~UCCE_GRA;
out_be32(uccf->p_uccm, temp);
out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
/* Issue host command */
cecr_subblock =
@ -1617,7 +1593,7 @@ static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
do {
msleep(10);
temp = in_be32(uccf->p_ucce);
} while (!(temp & UCCE_GRA) && --i);
} while (!(temp & UCC_GETH_UCCE_GRA) && --i);
uccf->stopped_tx = 1;
@ -1975,12 +1951,9 @@ static void ucc_geth_set_multi(struct net_device *dev)
uf_regs = ugeth->uccf->uf_regs;
if (dev->flags & IFF_PROMISC) {
out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO);
setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
} else {
out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO);
clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
@ -2020,7 +1993,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
struct phy_device *phydev = ugeth->phydev;
u32 tempval;
ugeth_vdbg("%s: IN", __func__);
@ -2037,9 +2009,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
out_be32(ugeth->uccf->p_ucce, 0xffffffff);
/* Disable Rx and Tx */
tempval = in_be32(&ug_regs->maccfg1);
tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
out_be32(&ug_regs->maccfg1, tempval);
clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
ucc_geth_memclean(ugeth);
}
@ -2153,10 +2123,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* Generate uccm_mask for receive */
uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
for (i = 0; i < ug_info->numQueuesRx; i++)
uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
for (i = 0; i < ug_info->numQueuesTx; i++)
uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
/* Initialize the general fast UCC block. */
if (ucc_fast_init(uf_info, &ugeth->uccf)) {
if (netif_msg_probe(ugeth))
@ -2185,7 +2155,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
struct ucc_geth __iomem *ug_regs;
int ret_val = -EINVAL;
u32 remoder = UCC_GETH_REMODER_INIT;
u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
u32 init_enet_pram_offset, cecr_subblock, command;
u32 ifstat, i, j, size, l2qt, l3qt, length;
u16 temoder = UCC_GETH_TEMODER_INIT;
u16 test;
@ -2281,10 +2251,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
&uf_regs->upsmr,
&ug_regs->uempr, &ug_regs->maccfg1);
maccfg1 = in_be32(&ug_regs->maccfg1);
maccfg1 |= MACCFG1_ENABLE_RX;
maccfg1 |= MACCFG1_ENABLE_TX;
out_be32(&ug_regs->maccfg1, maccfg1);
setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
/* Set IPGIFG */
/* For more details see the hardware spec. */
@ -3274,7 +3241,6 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
static int ucc_geth_poll(struct napi_struct *napi, int budget)
{
struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
struct net_device *dev = ugeth->dev;
struct ucc_geth_info *ug_info;
int howmany, i;
@ -3285,14 +3251,8 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) {
struct ucc_fast_private *uccf;
u32 uccm;
netif_rx_complete(napi);
uccf = ugeth->uccf;
uccm = in_be32(uccf->p_uccm);
uccm |= UCCE_RX_EVENTS;
out_be32(uccf->p_uccm, uccm);
setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS);
}
return howmany;
@ -3332,7 +3292,7 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
/* Tx event processing */
if (ucce & UCCE_TX_EVENTS) {
spin_lock(&ugeth->lock);
tx_mask = UCCE_TXBF_SINGLE_MASK;
tx_mask = UCC_GETH_UCCE_TXB0;
for (i = 0; i < ug_info->numQueuesTx; i++) {
if (ucce & tx_mask)
ucc_geth_tx(dev, i);
@ -3344,12 +3304,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
/* Errors and other events */
if (ucce & UCCE_OTHER) {
if (ucce & UCCE_BSY) {
if (ucce & UCC_GETH_UCCE_BSY)
dev->stats.rx_errors++;
}
if (ucce & UCCE_TXE) {
if (ucce & UCC_GETH_UCCE_TXE)
dev->stats.tx_errors++;
}
}
return IRQ_HANDLED;

View File

@ -162,92 +162,27 @@ struct ucc_geth {
boundary */
/* UCC GETH Event Register */
#define UCCE_MPD 0x80000000 /* Magic packet
detection */
#define UCCE_SCAR 0x40000000
#define UCCE_GRA 0x20000000 /* Tx graceful
stop
complete */
#define UCCE_CBPR 0x10000000
#define UCCE_BSY 0x08000000
#define UCCE_RXC 0x04000000
#define UCCE_TXC 0x02000000
#define UCCE_TXE 0x01000000
#define UCCE_TXB7 0x00800000
#define UCCE_TXB6 0x00400000
#define UCCE_TXB5 0x00200000
#define UCCE_TXB4 0x00100000
#define UCCE_TXB3 0x00080000
#define UCCE_TXB2 0x00040000
#define UCCE_TXB1 0x00020000
#define UCCE_TXB0 0x00010000
#define UCCE_RXB7 0x00008000
#define UCCE_RXB6 0x00004000
#define UCCE_RXB5 0x00002000
#define UCCE_RXB4 0x00001000
#define UCCE_RXB3 0x00000800
#define UCCE_RXB2 0x00000400
#define UCCE_RXB1 0x00000200
#define UCCE_RXB0 0x00000100
#define UCCE_RXF7 0x00000080
#define UCCE_RXF6 0x00000040
#define UCCE_RXF5 0x00000020
#define UCCE_RXF4 0x00000010
#define UCCE_RXF3 0x00000008
#define UCCE_RXF2 0x00000004
#define UCCE_RXF1 0x00000002
#define UCCE_RXF0 0x00000001
#define UCCE_TXB (UCC_GETH_UCCE_TXB7 | UCC_GETH_UCCE_TXB6 | \
UCC_GETH_UCCE_TXB5 | UCC_GETH_UCCE_TXB4 | \
UCC_GETH_UCCE_TXB3 | UCC_GETH_UCCE_TXB2 | \
UCC_GETH_UCCE_TXB1 | UCC_GETH_UCCE_TXB0)
#define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0)
#define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0)
#define UCCE_RXB (UCC_GETH_UCCE_RXB7 | UCC_GETH_UCCE_RXB6 | \
UCC_GETH_UCCE_RXB5 | UCC_GETH_UCCE_RXB4 | \
UCC_GETH_UCCE_RXB3 | UCC_GETH_UCCE_RXB2 | \
UCC_GETH_UCCE_RXB1 | UCC_GETH_UCCE_RXB0)
#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\
UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0)
#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\
UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0)
#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\
UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0)
#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\
UCCE_RXC | UCCE_TXC | UCCE_TXE)
#define UCCE_RXF (UCC_GETH_UCCE_RXF7 | UCC_GETH_UCCE_RXF6 | \
UCC_GETH_UCCE_RXF5 | UCC_GETH_UCCE_RXF4 | \
UCC_GETH_UCCE_RXF3 | UCC_GETH_UCCE_RXF2 | \
UCC_GETH_UCCE_RXF1 | UCC_GETH_UCCE_RXF0)
#define UCCE_RX_EVENTS (UCCE_RXF | UCCE_BSY)
#define UCCE_TX_EVENTS (UCCE_TXB | UCCE_TXE)
#define UCCE_OTHER (UCC_GETH_UCCE_SCAR | UCC_GETH_UCCE_GRA | \
UCC_GETH_UCCE_CBPR | UCC_GETH_UCCE_BSY | \
UCC_GETH_UCCE_RXC | UCC_GETH_UCCE_TXC | UCC_GETH_UCCE_TXE)
/* UCC GETH UPSMR (Protocol Specific Mode Register) */
#define UPSMR_ECM 0x04000000 /* Enable CAM
Miss or
Enable
Filtering
Miss */
#define UPSMR_HSE 0x02000000 /* Hardware
Statistics
Enable */
#define UPSMR_PRO 0x00400000 /* Promiscuous*/
#define UPSMR_CAP 0x00200000 /* CAM polarity
*/
#define UPSMR_RSH 0x00100000 /* Receive
Short Frames
*/
#define UPSMR_RPM 0x00080000 /* Reduced Pin
Mode
interfaces */
#define UPSMR_R10M 0x00040000 /* RGMII/RMII
10 Mode */
#define UPSMR_RLPB 0x00020000 /* RMII
Loopback
Mode */
#define UPSMR_TBIM 0x00010000 /* Ten-bit
Interface
Mode */
#define UPSMR_RMM 0x00001000 /* RMII/RGMII
Mode */
#define UPSMR_CAM 0x00000400 /* CAM Address
Matching */
#define UPSMR_BRO 0x00000200 /* Broadcast
Address */
#define UPSMR_RES1 0x00002000 /* Reserved
feild - must
be 1 */
#define UCCE_RX_EVENTS (UCCE_RXF | UCC_GETH_UCCE_BSY)
#define UCCE_TX_EVENTS (UCCE_TXB | UCC_GETH_UCCE_TXE)
/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
@ -945,9 +880,10 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_REMODER_INIT 0 /* bits that must be
set */
#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */
#define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value
for this
register */
/* Initial value for UPSMR */
#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
#define UCC_GETH_MACCFG1_INIT 0
#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)

View File

@ -622,6 +622,7 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_get_stats = rhine_get_stats,
.ndo_set_multicast_list = rhine_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = rhine_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER

View File

@ -855,6 +855,7 @@ static const struct net_device_ops velocity_netdev_ops = {
.ndo_start_xmit = velocity_xmit,
.ndo_get_stats = velocity_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_multicast_list = velocity_set_multi,
.ndo_change_mtu = velocity_change_mtu,
.ndo_do_ioctl = velocity_ioctl,

View File

@ -155,6 +155,7 @@ static const struct net_device_ops wd_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -1369,7 +1369,7 @@ EXPORT_SYMBOL_GPL(lbs_start_card);
void lbs_stop_card(struct lbs_private *priv)
{
struct net_device *dev = priv->dev;
struct net_device *dev;
struct cmd_ctrl_node *cmdnode;
unsigned long flags;
@ -1377,9 +1377,10 @@ void lbs_stop_card(struct lbs_private *priv)
if (!priv)
goto out;
dev = priv->dev;
netif_stop_queue(priv->dev);
netif_carrier_off(priv->dev);
netif_stop_queue(dev);
netif_carrier_off(dev);
lbs_debugfs_remove_one(priv);
if (priv->mesh_tlv) {

View File

@ -362,6 +362,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_multicast_list = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = yellowfin_tx_timeout,
};

View File

@ -147,6 +147,7 @@ static const struct net_device_ops zorro8390_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,

View File

@ -916,7 +916,7 @@ static struct ethtool_ops qeth_l2_osn_ops = {
.get_drvinfo = qeth_core_get_drvinfo,
};
static struct net_device_ops qeth_l2_netdev_ops = {
static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_l2_open,
.ndo_stop = qeth_l2_stop,
.ndo_get_stats = qeth_get_stats,

View File

@ -2894,7 +2894,7 @@ qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
return 0;
}
static struct net_device_ops qeth_l3_netdev_ops = {
static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_l3_open,
.ndo_stop = qeth_l3_stop,
.ndo_get_stats = qeth_get_stats,
@ -2909,6 +2909,22 @@ static struct net_device_ops qeth_l3_netdev_ops = {
.ndo_tx_timeout = qeth_tx_timeout,
};
static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_open = qeth_l3_open,
.ndo_stop = qeth_l3_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = qeth_l3_set_multicast_list,
.ndo_do_ioctl = qeth_l3_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
.ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
if (card->info.type == QETH_CARD_TYPE_OSAE) {
@ -2919,12 +2935,12 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
#endif
if (!card->dev)
return -ENODEV;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
} else {
card->dev = alloc_etherdev(0);
if (!card->dev)
return -ENODEV;
qeth_l3_netdev_ops.ndo_neigh_setup =
qeth_l3_neigh_setup;
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
/*IPv6 address autoconfiguration stuff*/
qeth_l3_get_unique_id(card);
@ -2937,6 +2953,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
if (!card->dev)
return -ENODEV;
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
qeth_l3_iqd_read_initial_mac(card);
} else
return -ENODEV;
@ -2944,7 +2961,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->ml_priv = card;
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->mtu = card->info.initial_mtu;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
card->dev->features |= NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |

View File

@ -270,8 +270,18 @@ struct dma_device {
/* --- public DMA engine API --- */
#ifdef CONFIG_DMA_ENGINE
void dmaengine_get(void);
void dmaengine_put(void);
#else
static inline void dmaengine_get(void)
{
}
static inline void dmaengine_put(void)
{
}
#endif
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,

View File

@ -323,8 +323,8 @@ struct input_dev;
*
* @rf_hw: [private] State of the hardware radio switch (OFF/ON)
*
* @debufs_dentry: [private] Used to hook up a debugfs entry. This
* shows up in the debugfs root as wimax:DEVICENAME.
* @debugfs_dentry: [private] Used to hook up a debugfs entry. This
* shows up in the debugfs root as wimax\:DEVICENAME.
*
* Description:
* This structure defines a common interface to access all WiMAX

View File

@ -85,12 +85,13 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
static inline int ebt_dev_check(char *entry, const struct net_device *device)
{
int i = 0;
const char *devname = device->name;
const char *devname;
if (*entry == '\0')
return 0;
if (!device)
return 1;
devname = device->name;
/* 1 is the wildcard token */
while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
i++;

View File

@ -1087,6 +1087,11 @@ int dev_open(struct net_device *dev)
*/
dev->flags |= IFF_UP;
/*
* Enable NET_DMA
*/
dmaengine_get();
/*
* Initialize multicasting status
*/
@ -1164,6 +1169,11 @@ int dev_close(struct net_device *dev)
*/
call_netdevice_notifiers(NETDEV_DOWN, dev);
/*
* Shutdown NET_DMA
*/
dmaengine_put();
return 0;
}
@ -5151,9 +5161,6 @@ static int __init net_dev_init(void)
hotcpu_notifier(dev_cpu_callback, 0);
dst_init();
dev_mcast_init();
#ifdef CONFIG_NET_DMA
dmaengine_get();
#endif
rc = 0;
out:
return rc;

View File

@ -29,7 +29,7 @@ config IP_DCCP_CCID3
http://www.ietf.org/rfc/rfc4342.txt
The TFRC congestion control algorithms were initially described in
RFC 5448.
RFC 5348.
This text was extracted from RFC 4340 (sec. 10.2),
http://www.ietf.org/rfc/rfc4340.txt

View File

@ -36,7 +36,7 @@ out:
return rc;
}
void __exit tfrc_lib_exit(void)
void tfrc_lib_exit(void)
{
tfrc_rx_packet_history_exit();
tfrc_tx_packet_history_exit();

View File

@ -124,7 +124,7 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m
static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
struct tipc_node_map *nm_diff)
{
int stop = sizeof(nm_a->map) / sizeof(u32);
int stop = ARRAY_SIZE(nm_a->map);
int w;
int b;
u32 map;

View File

@ -63,7 +63,6 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
if (len > skb_tailroom(skb))
len = skb_tailroom(skb);
skb->truesize += len;
__skb_put(skb, len);
len += plen;