Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (67 commits)
  cxgb4vf: recover from failure in cxgb4vf_open()
  netfilter: ebtables: make broute table work again
  netfilter: fix race in conntrack between dump_table and destroy
  ah: reload pointers to skb data after calling skb_cow_data()
  ah: update maximum truncated ICV length
  xfrm: check trunc_len in XFRMA_ALG_AUTH_TRUNC
  ehea: Increase the skb array usage
  net/fec: remove config FEC2 as it's used nowhere
  pcnet_cs: add new_id
  tcp: disallow bind() to reuse addr/port
  net/r8169: Update the function of parsing firmware
  net: ppp: use {get,put}_unaligned_be{16,32}
  CAIF: Fix IPv6 support in receive path for GPRS/3G
  arp: allow to invalidate specific ARP entries
  net_sched: factorize qdisc stats handling
  mlx4: Call alloc_etherdev to allocate RX and TX queues
  net: Add alloc_netdev_mqs function
  caif: don't set connection request param size before copying data
  cxgb4vf: fix mailbox data/control coherency domain race
  qlcnic: change module parameter permissions
  ...
This commit is contained in:
Linus Torvalds 2011-01-11 16:32:41 -08:00
commit 4162cf6497
109 changed files with 2905 additions and 1871 deletions

View File

@ -167,6 +167,7 @@ rx_ccid = 2
seq_window = 100 seq_window = 100
The initial sequence window (sec. 7.5.2) of the sender. This influences The initial sequence window (sec. 7.5.2) of the sender. This influences
the local ackno validity and the remote seqno validity windows (7.5.1). the local ackno validity and the remote seqno validity windows (7.5.1).
Values in the range Wmin = 32 (RFC 4340, 7.5.2) up to 2^32-1 can be set.
tx_qlen = 5 tx_qlen = 5
The size of the transmit buffer in packets. A value of 0 corresponds The size of the transmit buffer in packets. A value of 0 corresponds

View File

@ -1926,8 +1926,9 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
const struct firmware *fw; const struct firmware *fw;
unsigned long start_address; unsigned long start_address;
const struct ihex_binrec *rec; const struct ihex_binrec *rec;
const char *errmsg = 0;
int res; int res;
res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev); res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
if (res) { if (res) {
PRINTK (KERN_ERR, "Cannot load microcode data"); PRINTK (KERN_ERR, "Cannot load microcode data");
@ -1937,8 +1938,8 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
/* First record contains just the start address */ /* First record contains just the start address */
rec = (const struct ihex_binrec *)fw->data; rec = (const struct ihex_binrec *)fw->data;
if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) { if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
PRINTK (KERN_ERR, "Bad microcode data (no start record)"); errmsg = "no start record";
return -EINVAL; goto fail;
} }
start_address = be32_to_cpup((__be32 *)rec->data); start_address = be32_to_cpup((__be32 *)rec->data);
@ -1950,12 +1951,12 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr), PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
be16_to_cpu(rec->len)); be16_to_cpu(rec->len));
if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) { if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
PRINTK (KERN_ERR, "Bad microcode data (record too long)"); errmsg = "record too long";
return -EINVAL; goto fail;
} }
if (be16_to_cpu(rec->len) & 3) { if (be16_to_cpu(rec->len) & 3) {
PRINTK (KERN_ERR, "Bad microcode data (odd number of bytes)"); errmsg = "odd number of bytes";
return -EINVAL; goto fail;
} }
res = loader_write(lb, dev, rec); res = loader_write(lb, dev, rec);
if (res) if (res)
@ -1970,6 +1971,10 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
res = loader_start(lb, dev, start_address); res = loader_start(lb, dev, start_address);
return res; return res;
fail:
release_firmware(fw);
PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
return -EINVAL;
} }
/********** give adapter parameters **********/ /********** give adapter parameters **********/

View File

@ -1944,19 +1944,12 @@ config 68360_ENET
config FEC config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
select PHYLIB select PHYLIB
help help
Say Y here if you want to use the built-in 10/100 Fast ethernet Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors. controller on some Motorola ColdFire and Freescale i.MX processors.
config FEC2
bool "Second FEC ethernet controller (on some ColdFire CPUs)"
depends on FEC
help
Say Y here if you want to use the second built-in 10/100 Fast
ethernet controller on some Motorola ColdFire processors.
config FEC_MPC52xx config FEC_MPC52xx
tristate "MPC52xx FEC driver" tristate "MPC52xx FEC driver"
depends on PPC_MPC52xx && PPC_BESTCOMM depends on PPC_MPC52xx && PPC_BESTCOMM

View File

@ -8,6 +8,11 @@
* Licensed under the GPL-2 or later. * Licensed under the GPL-2 or later.
*/ */
#define DRV_VERSION "1.1"
#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
@ -41,12 +46,7 @@
#include "bfin_mac.h" #include "bfin_mac.h"
#define DRV_NAME "bfin_mac" MODULE_AUTHOR("Bryan Wu, Luke Yang");
#define DRV_VERSION "1.1"
#define DRV_AUTHOR "Bryan Wu, Luke Yang"
#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESC); MODULE_DESCRIPTION(DRV_DESC);
MODULE_ALIAS("platform:bfin_mac"); MODULE_ALIAS("platform:bfin_mac");
@ -189,8 +189,7 @@ static int desc_list_init(void)
/* allocate a new skb for next time receive */ /* allocate a new skb for next time receive */
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) { if (!new_skb) {
printk(KERN_NOTICE DRV_NAME pr_notice("init: low on mem - packet dropped\n");
": init: low on mem - packet dropped\n");
goto init_error; goto init_error;
} }
skb_reserve(new_skb, NET_IP_ALIGN); skb_reserve(new_skb, NET_IP_ALIGN);
@ -240,7 +239,7 @@ static int desc_list_init(void)
init_error: init_error:
desc_list_free(); desc_list_free();
printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); pr_err("kmalloc failed\n");
return -ENOMEM; return -ENOMEM;
} }
@ -259,8 +258,7 @@ static int bfin_mdio_poll(void)
while ((bfin_read_EMAC_STAADD()) & STABUSY) { while ((bfin_read_EMAC_STAADD()) & STABUSY) {
udelay(1); udelay(1);
if (timeout_cnt-- < 0) { if (timeout_cnt-- < 0) {
printk(KERN_ERR DRV_NAME pr_err("wait MDC/MDIO transaction to complete timeout\n");
": wait MDC/MDIO transaction to complete timeout\n");
return -ETIMEDOUT; return -ETIMEDOUT;
} }
} }
@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev)
opmode &= ~RMII_10; opmode &= ~RMII_10;
break; break;
default: default:
printk(KERN_WARNING netdev_warn(dev,
"%s: Ack! Speed (%d) is not 10/100!\n", "Ack! Speed (%d) is not 10/100!\n",
DRV_NAME, phydev->speed); phydev->speed);
break; break;
} }
bfin_write_EMAC_OPMODE(opmode); bfin_write_EMAC_OPMODE(opmode);
@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode)
/* now we are supposed to have a proper phydev, to attach to... */ /* now we are supposed to have a proper phydev, to attach to... */
if (!phydev) { if (!phydev) {
printk(KERN_INFO "%s: Don't found any phy device at all\n", netdev_err(dev, "no phy device found\n");
dev->name);
return -ENODEV; return -ENODEV;
} }
if (phy_mode != PHY_INTERFACE_MODE_RMII && if (phy_mode != PHY_INTERFACE_MODE_RMII &&
phy_mode != PHY_INTERFACE_MODE_MII) { phy_mode != PHY_INTERFACE_MODE_MII) {
printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name); netdev_err(dev, "invalid phy interface mode\n");
return -EINVAL; return -EINVAL;
} }
@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode)
0, phy_mode); 0, phy_mode);
if (IS_ERR(phydev)) { if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); netdev_err(dev, "could not attach PHY\n");
return PTR_ERR(phydev); return PTR_ERR(phydev);
} }
@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode)
lp->old_duplex = -1; lp->old_duplex = -1;
lp->phydev = phydev; lp->phydev = phydev;
printk(KERN_INFO "%s: attached PHY driver [%s] " pr_info("attached PHY driver [%s] "
"(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
"@sclk=%dMHz)\n", phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, MDC_CLK, mdc_div, sclk/1000000);
MDC_CLK, mdc_div, sclk/1000000);
return 0; return 0;
} }
@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info) struct ethtool_drvinfo *info)
{ {
strcpy(info->driver, DRV_NAME); strcpy(info->driver, KBUILD_MODNAME);
strcpy(info->version, DRV_VERSION); strcpy(info->version, DRV_VERSION);
strcpy(info->fw_version, "N/A"); strcpy(info->fw_version, "N/A");
strcpy(info->bus_info, dev_name(&dev->dev)); strcpy(info->bus_info, dev_name(&dev->dev));
@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
}; };
/**************************************************************************/ /**************************************************************************/
void setup_system_regs(struct net_device *dev) static void setup_system_regs(struct net_device *dev)
{ {
struct bfin_mac_local *lp = netdev_priv(dev); struct bfin_mac_local *lp = netdev_priv(dev);
int i; int i;
@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev)
bfin_write_EMAC_MMC_CTL(RSTC | CROLL); bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
/* Set vlan regs to let 1522 bytes long packets pass through */
bfin_write_EMAC_VLAN1(lp->vlan1_mask);
bfin_write_EMAC_VLAN2(lp->vlan2_mask);
/* Initialize the TX DMA channel registers */ /* Initialize the TX DMA channel registers */
bfin_write_DMA2_X_COUNT(0); bfin_write_DMA2_X_COUNT(0);
bfin_write_DMA2_X_MODIFY(4); bfin_write_DMA2_X_MODIFY(4);
@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
udelay(1); udelay(1);
if (timeout_cnt == 0) if (timeout_cnt == 0)
printk(KERN_ERR DRV_NAME netdev_err(netdev, "timestamp the TX packet failed\n");
": fails to timestamp the TX packet\n");
else { else {
struct skb_shared_hwtstamps shhwtstamps; struct skb_shared_hwtstamps shhwtstamps;
u64 ns; u64 ns;
@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev)
* we which case we simply drop the packet * we which case we simply drop the packet
*/ */
if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
printk(KERN_NOTICE DRV_NAME netdev_notice(dev, "rx: receive error - packet dropped\n");
": rx: receive error - packet dropped\n");
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
goto out; goto out;
} }
@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev)
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) { if (!new_skb) {
printk(KERN_NOTICE DRV_NAME netdev_notice(dev, "rx: low on mem - packet dropped\n");
": rx: low on mem - packet dropped\n");
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
goto out; goto out;
} }
@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev)
int ret; int ret;
u32 opmode; u32 opmode;
pr_debug("%s: %s\n", DRV_NAME, __func__); pr_debug("%s\n", __func__);
/* Set RX DMA */ /* Set RX DMA */
bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@ -1323,7 +1320,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
u32 sysctl; u32 sysctl;
if (dev->flags & IFF_PROMISC) { if (dev->flags & IFF_PROMISC) {
printk(KERN_INFO "%s: set to promisc mode\n", dev->name); netdev_info(dev, "set promisc mode\n");
sysctl = bfin_read_EMAC_OPMODE(); sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PR; sysctl |= PR;
bfin_write_EMAC_OPMODE(sysctl); bfin_write_EMAC_OPMODE(sysctl);
@ -1393,7 +1390,7 @@ static int bfin_mac_open(struct net_device *dev)
* address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
*/ */
if (!is_valid_ether_addr(dev->dev_addr)) { if (!is_valid_ether_addr(dev->dev_addr)) {
printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); netdev_warn(dev, "no valid ethernet hw addr\n");
return -EINVAL; return -EINVAL;
} }
@ -1527,6 +1524,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
goto out_err_mii_probe; goto out_err_mii_probe;
} }
lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
/* Fill in the fields of the device structure with ethernet values. */ /* Fill in the fields of the device structure with ethernet values. */
ether_setup(ndev); ether_setup(ndev);
@ -1558,7 +1558,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
bfin_mac_hwtstamp_init(ndev); bfin_mac_hwtstamp_init(ndev);
/* now, print out the card info, in a short format.. */ /* now, print out the card info, in a short format.. */
dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
return 0; return 0;
@ -1650,7 +1650,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
* so set the GPIO pins to Ethernet mode * so set the GPIO pins to Ethernet mode
*/ */
pin_req = mii_bus_pd->mac_peripherals; pin_req = mii_bus_pd->mac_peripherals;
rc = peripheral_request_list(pin_req, DRV_NAME); rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
if (rc) { if (rc) {
dev_err(&pdev->dev, "Requesting peripherals failed!\n"); dev_err(&pdev->dev, "Requesting peripherals failed!\n");
return rc; return rc;
@ -1739,7 +1739,7 @@ static struct platform_driver bfin_mac_driver = {
.resume = bfin_mac_resume, .resume = bfin_mac_resume,
.suspend = bfin_mac_suspend, .suspend = bfin_mac_suspend,
.driver = { .driver = {
.name = DRV_NAME, .name = KBUILD_MODNAME,
.owner = THIS_MODULE, .owner = THIS_MODULE,
}, },
}; };

View File

@ -17,7 +17,14 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/bfin_mac.h> #include <linux/bfin_mac.h>
/*
* Disable hardware checksum for bug #5600 if writeback cache is
* enabled. Otherwize, corrupted RX packet will be sent up stack
* without error mark.
*/
#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
#define BFIN_MAC_CSUM_OFFLOAD #define BFIN_MAC_CSUM_OFFLOAD
#endif
#define TX_RECLAIM_JIFFIES (HZ / 5) #define TX_RECLAIM_JIFFIES (HZ / 5)
@ -68,7 +75,6 @@ struct bfin_mac_local {
*/ */
struct net_device_stats stats; struct net_device_stats stats;
unsigned char Mac[6]; /* MAC address of the board */
spinlock_t lock; spinlock_t lock;
int wol; /* Wake On Lan */ int wol; /* Wake On Lan */
@ -76,6 +82,9 @@ struct bfin_mac_local {
struct timer_list tx_reclaim_timer; struct timer_list tx_reclaim_timer;
struct net_device *ndev; struct net_device *ndev;
/* Data for EMAC_VLAN1 regs */
u16 vlan1_mask, vlan2_mask;
/* MII and PHY stuffs */ /* MII and PHY stuffs */
int old_link; /* used by bf537_adjust_link */ int old_link; /* used by bf537_adjust_link */
int old_speed; int old_speed;

View File

@ -636,6 +636,7 @@ struct bnx2x_common {
#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
int flash_size; int flash_size;
#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */

File diff suppressed because it is too large Load Diff

View File

@ -24,6 +24,7 @@
#include "bnx2x.h" #include "bnx2x.h"
#include "bnx2x_cmn.h" #include "bnx2x_cmn.h"
#include "bnx2x_dump.h" #include "bnx2x_dump.h"
#include "bnx2x_init.h"
/* Note: in the format strings below %s is replaced by the queue-name which is /* Note: in the format strings below %s is replaced by the queue-name which is
* either its index or 'fcoe' for the fcoe queue. Make sure the format string * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@ -472,7 +473,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
{ {
struct bnx2x *bp = netdev_priv(dev); struct bnx2x *bp = netdev_priv(dev);
int regdump_len = 0; int regdump_len = 0;
int i; int i, j, k;
if (CHIP_IS_E1(bp)) { if (CHIP_IS_E1(bp)) {
for (i = 0; i < REGS_COUNT; i++) for (i = 0; i < REGS_COUNT; i++)
@ -502,6 +503,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
if (IS_E2_ONLINE(wreg_addrs_e2[i].info)) if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
regdump_len += wreg_addrs_e2[i].size * regdump_len += wreg_addrs_e2[i].size *
(1 + wreg_addrs_e2[i].read_regs_count); (1 + wreg_addrs_e2[i].read_regs_count);
for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
for (k = 0; k < PAGE_READ_REGS_E2; k++)
if (IS_E2_ONLINE(page_read_regs_e2[k].
info))
regdump_len +=
page_read_regs_e2[k].size;
}
} }
regdump_len *= 4; regdump_len *= 4;
regdump_len += sizeof(struct dump_hdr); regdump_len += sizeof(struct dump_hdr);
@ -539,6 +549,12 @@ static void bnx2x_get_regs(struct net_device *dev,
if (!netif_running(bp->dev)) if (!netif_running(bp->dev))
return; return;
/* Disable parity attentions as long as following dump may
* cause false alarms by reading never written registers. We
* will re-enable parity attentions right after the dump.
*/
bnx2x_disable_blocks_parity(bp);
dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
dump_hdr.dump_sign = dump_sign_all; dump_hdr.dump_sign = dump_sign_all;
dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
@ -580,6 +596,10 @@ static void bnx2x_get_regs(struct net_device *dev,
bnx2x_read_pages_regs_e2(bp, p); bnx2x_read_pages_regs_e2(bp, p);
} }
/* Re-enable parity attentions */
bnx2x_clear_blocks_parity(bp);
if (CHIP_PARITY_ENABLED(bp))
bnx2x_enable_blocks_parity(bp);
} }
#define PHY_FW_VER_LEN 20 #define PHY_FW_VER_LEN 20

View File

@ -192,5 +192,225 @@ struct src_ent {
u64 next; u64 next;
}; };
/****************************************************************************
* Parity configuration
****************************************************************************/
#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
{ \
block##_REG_##block##_PRTY_MASK, \
block##_REG_##block##_PRTY_STS_CLR, \
en_mask, {m1, m1h, m2}, #block \
}
#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
{ \
block##_REG_##block##_PRTY_MASK_0, \
block##_REG_##block##_PRTY_STS_CLR_0, \
en_mask, {m1, m1h, m2}, #block"_0" \
}
#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
{ \
block##_REG_##block##_PRTY_MASK_1, \
block##_REG_##block##_PRTY_STS_CLR_1, \
en_mask, {m1, m1h, m2}, #block"_1" \
}
static const struct {
u32 mask_addr;
u32 sts_clr_addr;
u32 en_mask; /* Mask to enable parity attentions */
struct {
u32 e1; /* 57710 */
u32 e1h; /* 57711 */
u32 e2; /* 57712 */
} reg_mask; /* Register mask (all valid bits) */
char name[7]; /* Block's longest name is 6 characters long
* (name + suffix)
*/
} bnx2x_blocks_parity_data[] = {
/* bit 19 masked */
/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
/* bit 5,18,20-31 */
/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
/* bit 5 */
/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
* want to handle "system kill" flow at the moment.
*/
BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
{0xf, 0xf, 0xf}, "UPB"},
{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
{0xf, 0xf, 0xf}, "XPB"},
BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
};
/* [28] MCP Latched rom_parity
* [29] MCP Latched ump_rx_parity
* [30] MCP Latched ump_tx_parity
* [31] MCP Latched scpad_parity
*/
#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
/* Below registers control the MCP parity attention output. When
* MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
* enabled, when cleared - disabled.
*/
static const u32 mcp_attn_ctl_regs[] = {
MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
MISC_REG_AEU_ENABLE4_NIG_0,
MISC_REG_AEU_ENABLE4_PXP_0,
MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
MISC_REG_AEU_ENABLE4_NIG_1,
MISC_REG_AEU_ENABLE4_PXP_1
};
static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
{
int i;
u32 reg_val;
for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
if (enable)
reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
else
reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
}
}
static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
{
if (CHIP_IS_E1(bp))
return bnx2x_blocks_parity_data[idx].reg_mask.e1;
else if (CHIP_IS_E1H(bp))
return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
else
return bnx2x_blocks_parity_data[idx].reg_mask.e2;
}
static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
{
int i;
for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
if (dis_mask) {
REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
dis_mask);
DP(NETIF_MSG_HW, "Setting parity mask "
"for %s to\t\t0x%x\n",
bnx2x_blocks_parity_data[i].name, dis_mask);
}
}
/* Disable MCP parity attentions */
bnx2x_set_mcp_parity(bp, false);
}
/**
* Clear the parity error status registers.
*/
static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
{
int i;
u32 reg_val, mcp_aeu_bits =
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
/* Clear SEM_FAST parities */
REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
if (reg_mask) {
reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
sts_clr_addr);
if (reg_val & reg_mask)
DP(NETIF_MSG_HW,
"Parity errors in %s: 0x%x\n",
bnx2x_blocks_parity_data[i].name,
reg_val & reg_mask);
}
}
/* Check if there were parity attentions in MCP */
reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
if (reg_val & mcp_aeu_bits)
DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
reg_val & mcp_aeu_bits);
/* Clear parity attentions in MCP:
* [7] clears Latched rom_parity
* [8] clears Latched ump_rx_parity
* [9] clears Latched ump_tx_parity
* [10] clears Latched scpad_parity (both ports)
*/
REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
}
static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
{
int i;
for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
if (reg_mask)
REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
bnx2x_blocks_parity_data[i].en_mask & reg_mask);
}
/* Enable MCP parity attentions */
bnx2x_set_mcp_parity(bp, true);
}
#endif /* BNX2X_INIT_H */ #endif /* BNX2X_INIT_H */

View File

@ -3152,7 +3152,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
/* /*
* should be run under rtnl lock * should be run under rtnl lock
@ -3527,7 +3526,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
try to handle this event */ try to handle this event */
bnx2x_acquire_alr(bp); bnx2x_acquire_alr(bp);
if (bnx2x_chk_parity_attn(bp)) { if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
bp->recovery_state = BNX2X_RECOVERY_INIT; bp->recovery_state = BNX2X_RECOVERY_INIT;
bnx2x_set_reset_in_progress(bp); bnx2x_set_reset_in_progress(bp);
schedule_delayed_work(&bp->reset_task, 0); schedule_delayed_work(&bp->reset_task, 0);
@ -4754,7 +4753,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
return 0; /* OK */ return 0; /* OK */
} }
static void enable_blocks_attention(struct bnx2x *bp) static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
{ {
REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
if (CHIP_IS_E2(bp)) if (CHIP_IS_E2(bp))
@ -4808,53 +4807,9 @@ static void enable_blocks_attention(struct bnx2x *bp)
REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
} }
static const struct {
u32 addr;
u32 mask;
} bnx2x_parity_mask[] = {
{PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
{PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
{PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
{HC_REG_HC_PRTY_MASK, 0x7},
{MISC_REG_MISC_PRTY_MASK, 0x1},
{QM_REG_QM_PRTY_MASK, 0x0},
{DORQ_REG_DORQ_PRTY_MASK, 0x0},
{GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
{GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
{SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
{CDU_REG_CDU_PRTY_MASK, 0x0},
{CFC_REG_CFC_PRTY_MASK, 0x0},
{DBG_REG_DBG_PRTY_MASK, 0x0},
{DMAE_REG_DMAE_PRTY_MASK, 0x0},
{BRB1_REG_BRB1_PRTY_MASK, 0x0},
{PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
{TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
{CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
{USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
{XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
{TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
{TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
{USEM_REG_USEM_PRTY_MASK_0, 0x0},
{USEM_REG_USEM_PRTY_MASK_1, 0x0},
{CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
{CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
{XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
{XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
};
static void enable_blocks_parity(struct bnx2x *bp)
{
int i;
for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
REG_WR(bp, bnx2x_parity_mask[i].addr,
bnx2x_parity_mask[i].mask);
}
static void bnx2x_reset_common(struct bnx2x *bp) static void bnx2x_reset_common(struct bnx2x *bp)
{ {
/* reset_common */ /* reset_common */
@ -5350,9 +5305,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
/* clear PXP2 attentions */ /* clear PXP2 attentions */
REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
enable_blocks_attention(bp); bnx2x_enable_blocks_attention(bp);
if (CHIP_PARITY_SUPPORTED(bp)) if (CHIP_PARITY_ENABLED(bp))
enable_blocks_parity(bp); bnx2x_enable_blocks_parity(bp);
if (!BP_NOMCP(bp)) { if (!BP_NOMCP(bp)) {
/* In E2 2-PORT mode, same ext phy is used for the two paths */ /* In E2 2-PORT mode, same ext phy is used for the two paths */
@ -8751,13 +8706,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
dev_err(&bp->pdev->dev, "MCP disabled, " dev_err(&bp->pdev->dev, "MCP disabled, "
"must load devices in order!\n"); "must load devices in order!\n");
/* Set multi queue mode */
if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
"requested is not MSI-X\n");
multi_mode = ETH_RSS_MODE_DISABLED;
}
bp->multi_mode = multi_mode; bp->multi_mode = multi_mode;
bp->int_mode = int_mode; bp->int_mode = int_mode;
@ -9560,9 +9508,15 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
/* Delete all NAPI objects */ /* Delete all NAPI objects */
bnx2x_del_all_napi(bp); bnx2x_del_all_napi(bp);
/* Power on: we can't let PCI layer write to us while we are in D3 */
bnx2x_set_power_state(bp, PCI_D0);
/* Disable MSI/MSI-X */ /* Disable MSI/MSI-X */
bnx2x_disable_msi(bp); bnx2x_disable_msi(bp);
/* Power off */
bnx2x_set_power_state(bp, PCI_D3hot);
/* Make sure RESET task is not scheduled before continuing */ /* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->reset_task); cancel_delayed_work_sync(&bp->reset_task);

View File

@ -18,6 +18,8 @@
* WR - Write Clear (write 1 to clear the bit) * WR - Write Clear (write 1 to clear the bit)
* *
*/ */
#ifndef BNX2X_REG_H
#define BNX2X_REG_H
#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) #define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) #define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
@ -39,6 +41,8 @@
#define BRB1_REG_BRB1_PRTY_MASK 0x60138 #define BRB1_REG_BRB1_PRTY_MASK 0x60138
/* [R 4] Parity register #0 read */ /* [R 4] Parity register #0 read */
#define BRB1_REG_BRB1_PRTY_STS 0x6012c #define BRB1_REG_BRB1_PRTY_STS 0x6012c
/* [RC 4] Parity register #0 read clear */
#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
* address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
* BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
@ -132,8 +136,12 @@
#define CCM_REG_CCM_INT_MASK 0xd01e4 #define CCM_REG_CCM_INT_MASK 0xd01e4
/* [R 11] Interrupt register #0 read */ /* [R 11] Interrupt register #0 read */
#define CCM_REG_CCM_INT_STS 0xd01d8 #define CCM_REG_CCM_INT_STS 0xd01d8
/* [RW 27] Parity mask register #0 read/write */
#define CCM_REG_CCM_PRTY_MASK 0xd01f4
/* [R 27] Parity register #0 read */ /* [R 27] Parity register #0 read */
#define CCM_REG_CCM_PRTY_STS 0xd01e8 #define CCM_REG_CCM_PRTY_STS 0xd01e8
/* [RC 27] Parity register #0 read clear */
#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back; Is used to determine the number of the AG context REG-pairs written back;
@ -350,6 +358,8 @@
#define CDU_REG_CDU_PRTY_MASK 0x10104c #define CDU_REG_CDU_PRTY_MASK 0x10104c
/* [R 5] Parity register #0 read */ /* [R 5] Parity register #0 read */
#define CDU_REG_CDU_PRTY_STS 0x101040 #define CDU_REG_CDU_PRTY_STS 0x101040
/* [RC 5] Parity register #0 read clear */
#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
/* [RC 32] logging of error data in case of a CDU load error: /* [RC 32] logging of error data in case of a CDU load error:
{expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
ype_error; ctual_active; ctual_compressed_context}; */ ype_error; ctual_active; ctual_compressed_context}; */
@ -381,6 +391,8 @@
#define CFC_REG_CFC_PRTY_MASK 0x104118 #define CFC_REG_CFC_PRTY_MASK 0x104118
/* [R 4] Parity register #0 read */ /* [R 4] Parity register #0 read */
#define CFC_REG_CFC_PRTY_STS 0x10410c #define CFC_REG_CFC_PRTY_STS 0x10410c
/* [RC 4] Parity register #0 read clear */
#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ /* [RW 21] CID cam access (21:1 - Data; alid - 0) */
#define CFC_REG_CID_CAM 0x104800 #define CFC_REG_CID_CAM 0x104800
#define CFC_REG_CONTROL0 0x104028 #define CFC_REG_CONTROL0 0x104028
@ -466,6 +478,8 @@
#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc #define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
/* [R 11] Parity register #0 read */ /* [R 11] Parity register #0 read */
#define CSDM_REG_CSDM_PRTY_STS 0xc22b0 #define CSDM_REG_CSDM_PRTY_STS 0xc22b0
/* [RC 11] Parity register #0 read clear */
#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
#define CSDM_REG_ENABLE_IN1 0xc2238 #define CSDM_REG_ENABLE_IN1 0xc2238
#define CSDM_REG_ENABLE_IN2 0xc223c #define CSDM_REG_ENABLE_IN2 0xc223c
#define CSDM_REG_ENABLE_OUT1 0xc2240 #define CSDM_REG_ENABLE_OUT1 0xc2240
@ -556,6 +570,9 @@
/* [R 32] Parity register #0 read */ /* [R 32] Parity register #0 read */
#define CSEM_REG_CSEM_PRTY_STS_0 0x200124 #define CSEM_REG_CSEM_PRTY_STS_0 0x200124
#define CSEM_REG_CSEM_PRTY_STS_1 0x200134 #define CSEM_REG_CSEM_PRTY_STS_1 0x200134
/* [RC 32] Parity register #0 read clear */
#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
#define CSEM_REG_ENABLE_IN 0x2000a4 #define CSEM_REG_ENABLE_IN 0x2000a4
#define CSEM_REG_ENABLE_OUT 0x2000a8 #define CSEM_REG_ENABLE_OUT 0x2000a8
/* [RW 32] This address space contains all registers and memories that are /* [RW 32] This address space contains all registers and memories that are
@ -648,6 +665,8 @@
#define DBG_REG_DBG_PRTY_MASK 0xc0a8 #define DBG_REG_DBG_PRTY_MASK 0xc0a8
/* [R 1] Parity register #0 read */ /* [R 1] Parity register #0 read */
#define DBG_REG_DBG_PRTY_STS 0xc09c #define DBG_REG_DBG_PRTY_STS 0xc09c
/* [RC 1] Parity register #0 read clear */
#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The /* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
* function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
* 4.Completion function=0; 5.Error handling=0 */ * 4.Completion function=0; 5.Error handling=0 */
@ -668,6 +687,8 @@
#define DMAE_REG_DMAE_PRTY_MASK 0x102064 #define DMAE_REG_DMAE_PRTY_MASK 0x102064
/* [R 4] Parity register #0 read */ /* [R 4] Parity register #0 read */
#define DMAE_REG_DMAE_PRTY_STS 0x102058 #define DMAE_REG_DMAE_PRTY_STS 0x102058
/* [RC 4] Parity register #0 read clear */
#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
/* [RW 1] Command 0 go. */ /* [RW 1] Command 0 go. */
#define DMAE_REG_GO_C0 0x102080 #define DMAE_REG_GO_C0 0x102080
/* [RW 1] Command 1 go. */ /* [RW 1] Command 1 go. */
@ -734,6 +755,8 @@
#define DORQ_REG_DORQ_PRTY_MASK 0x170190 #define DORQ_REG_DORQ_PRTY_MASK 0x170190
/* [R 2] Parity register #0 read */ /* [R 2] Parity register #0 read */
#define DORQ_REG_DORQ_PRTY_STS 0x170184 #define DORQ_REG_DORQ_PRTY_STS 0x170184
/* [RC 2] Parity register #0 read clear */
#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
/* [RW 8] The address to write the DPM CID to STORM. */ /* [RW 8] The address to write the DPM CID to STORM. */
#define DORQ_REG_DPM_CID_ADDR 0x170044 #define DORQ_REG_DPM_CID_ADDR 0x170044
/* [RW 5] The DPM mode CID extraction offset. */ /* [RW 5] The DPM mode CID extraction offset. */
@ -842,8 +865,12 @@
/* [R 1] data availble for error memory. If this bit is clear do not red /* [R 1] data availble for error memory. If this bit is clear do not red
* from error_handling_memory. */ * from error_handling_memory. */
#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 #define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
/* [RW 11] Parity mask register #0 read/write */
#define IGU_REG_IGU_PRTY_MASK 0x1300a8
/* [R 11] Parity register #0 read */ /* [R 11] Parity register #0 read */
#define IGU_REG_IGU_PRTY_STS 0x13009c #define IGU_REG_IGU_PRTY_STS 0x13009c
/* [RC 11] Parity register #0 read clear */
#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
/* [R 4] Debug: int_handle_fsm */ /* [R 4] Debug: int_handle_fsm */
#define IGU_REG_INT_HANDLE_FSM 0x130050 #define IGU_REG_INT_HANDLE_FSM 0x130050
#define IGU_REG_LEADING_EDGE_LATCH 0x130134 #define IGU_REG_LEADING_EDGE_LATCH 0x130134
@ -1501,6 +1528,8 @@
#define MISC_REG_MISC_PRTY_MASK 0xa398 #define MISC_REG_MISC_PRTY_MASK 0xa398
/* [R 1] Parity register #0 read */ /* [R 1] Parity register #0 read */
#define MISC_REG_MISC_PRTY_STS 0xa38c #define MISC_REG_MISC_PRTY_STS 0xa38c
/* [RC 1] Parity register #0 read clear */
#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
#define MISC_REG_NIG_WOL_P0 0xa270 #define MISC_REG_NIG_WOL_P0 0xa270
#define MISC_REG_NIG_WOL_P1 0xa274 #define MISC_REG_NIG_WOL_P1 0xa274
/* [R 1] If set indicate that the pcie_rst_b was asserted without perst /* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@ -2082,6 +2111,10 @@
#define PBF_REG_PBF_INT_MASK 0x1401d4 #define PBF_REG_PBF_INT_MASK 0x1401d4
/* [R 5] Interrupt register #0 read */ /* [R 5] Interrupt register #0 read */
#define PBF_REG_PBF_INT_STS 0x1401c8 #define PBF_REG_PBF_INT_STS 0x1401c8
/* [RW 20] Parity mask register #0 read/write */
#define PBF_REG_PBF_PRTY_MASK 0x1401e4
/* [RC 20] Parity register #0 read clear */
#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
#define PB_REG_CONTROL 0 #define PB_REG_CONTROL 0
/* [RW 2] Interrupt mask register #0 read/write */ /* [RW 2] Interrupt mask register #0 read/write */
#define PB_REG_PB_INT_MASK 0x28 #define PB_REG_PB_INT_MASK 0x28
@ -2091,6 +2124,8 @@
#define PB_REG_PB_PRTY_MASK 0x38 #define PB_REG_PB_PRTY_MASK 0x38
/* [R 4] Parity register #0 read */ /* [R 4] Parity register #0 read */
#define PB_REG_PB_PRTY_STS 0x2c #define PB_REG_PB_PRTY_STS 0x2c
/* [RC 4] Parity register #0 read clear */
#define PB_REG_PB_PRTY_STS_CLR 0x30
#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) #define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) #define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) #define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
@ -2446,6 +2481,8 @@
#define PRS_REG_PRS_PRTY_MASK 0x401a4 #define PRS_REG_PRS_PRTY_MASK 0x401a4
/* [R 8] Parity register #0 read */ /* [R 8] Parity register #0 read */
#define PRS_REG_PRS_PRTY_STS 0x40198 #define PRS_REG_PRS_PRTY_STS 0x40198
/* [RC 8] Parity register #0 read clear */
#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
/* [RW 8] Context region for pure acknowledge packets. Used in CFC load /* [RW 8] Context region for pure acknowledge packets. Used in CFC load
request message */ request message */
#define PRS_REG_PURE_REGIONS 0x40024 #define PRS_REG_PURE_REGIONS 0x40024
@ -2599,6 +2636,9 @@
/* [R 32] Parity register #0 read */ /* [R 32] Parity register #0 read */
#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c #define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c #define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
/* [RC 32] Parity register #0 read clear */
#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
/* [R 1] Debug only: The 'almost full' indication from each fifo (gives /* [R 1] Debug only: The 'almost full' indication from each fifo (gives
indication about backpressure) */ indication about backpressure) */
#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 #define PXP2_REG_RD_ALMOST_FULL_0 0x120424
@ -3001,6 +3041,8 @@
#define PXP_REG_PXP_PRTY_MASK 0x103094 #define PXP_REG_PXP_PRTY_MASK 0x103094
/* [R 26] Parity register #0 read */ /* [R 26] Parity register #0 read */
#define PXP_REG_PXP_PRTY_STS 0x103088 #define PXP_REG_PXP_PRTY_STS 0x103088
/* [RC 27] Parity register #0 read clear */
#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
/* [RW 4] The activity counter initial increment value sent in the load /* [RW 4] The activity counter initial increment value sent in the load
request */ request */
#define QM_REG_ACTCTRINITVAL_0 0x168040 #define QM_REG_ACTCTRINITVAL_0 0x168040
@ -3157,6 +3199,8 @@
#define QM_REG_QM_PRTY_MASK 0x168454 #define QM_REG_QM_PRTY_MASK 0x168454
/* [R 12] Parity register #0 read */ /* [R 12] Parity register #0 read */
#define QM_REG_QM_PRTY_STS 0x168448 #define QM_REG_QM_PRTY_STS 0x168448
/* [RC 12] Parity register #0 read clear */
#define QM_REG_QM_PRTY_STS_CLR 0x16844c
/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ /* [R 32] Current queues in pipeline: Queues from 32 to 63 */
#define QM_REG_QSTATUS_HIGH 0x16802c #define QM_REG_QSTATUS_HIGH 0x16802c
/* [R 32] Current queues in pipeline: Queues from 96 to 127 */ /* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@ -3442,6 +3486,8 @@
#define QM_REG_WRRWEIGHTS_9 0x168848 #define QM_REG_WRRWEIGHTS_9 0x168848
/* [R 6] Keep the fill level of the fifo from write client 1 */ /* [R 6] Keep the fill level of the fifo from write client 1 */
#define QM_REG_XQM_WRC_FIFOLVL 0x168000 #define QM_REG_XQM_WRC_FIFOLVL 0x168000
/* [W 1] reset to parity interrupt */
#define SEM_FAST_REG_PARITY_RST 0x18840
#define SRC_REG_COUNTFREE0 0x40500 #define SRC_REG_COUNTFREE0 0x40500
/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two /* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
ports. If set the searcher support 8 functions. */ ports. If set the searcher support 8 functions. */
@ -3470,6 +3516,8 @@
#define SRC_REG_SRC_PRTY_MASK 0x404c8 #define SRC_REG_SRC_PRTY_MASK 0x404c8
/* [R 3] Parity register #0 read */ /* [R 3] Parity register #0 read */
#define SRC_REG_SRC_PRTY_STS 0x404bc #define SRC_REG_SRC_PRTY_STS 0x404bc
/* [RC 3] Parity register #0 read clear */
#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
#define TCM_REG_CAM_OCCUP 0x5017c #define TCM_REG_CAM_OCCUP 0x5017c
/* [RW 1] CDU AG read Interface enable. If 0 - the request input is /* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@ -3596,8 +3644,12 @@
#define TCM_REG_TCM_INT_MASK 0x501dc #define TCM_REG_TCM_INT_MASK 0x501dc
/* [R 11] Interrupt register #0 read */ /* [R 11] Interrupt register #0 read */
#define TCM_REG_TCM_INT_STS 0x501d0 #define TCM_REG_TCM_INT_STS 0x501d0
/* [RW 27] Parity mask register #0 read/write */
#define TCM_REG_TCM_PRTY_MASK 0x501ec
/* [R 27] Parity register #0 read */ /* [R 27] Parity register #0 read */
#define TCM_REG_TCM_PRTY_STS 0x501e0 #define TCM_REG_TCM_PRTY_STS 0x501e0
/* [RC 27] Parity register #0 read clear */
#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back; Is used to determine the number of the AG context REG-pairs written back;
@ -3755,6 +3807,10 @@
#define TM_REG_TM_INT_MASK 0x1640fc #define TM_REG_TM_INT_MASK 0x1640fc
/* [R 1] Interrupt register #0 read */ /* [R 1] Interrupt register #0 read */
#define TM_REG_TM_INT_STS 0x1640f0 #define TM_REG_TM_INT_STS 0x1640f0
/* [RW 7] Parity mask register #0 read/write */
#define TM_REG_TM_PRTY_MASK 0x16410c
/* [RC 7] Parity register #0 read clear */
#define TM_REG_TM_PRTY_STS_CLR 0x164104
/* [RW 8] The event id for aggregated interrupt 0 */ /* [RW 8] The event id for aggregated interrupt 0 */
#define TSDM_REG_AGG_INT_EVENT_0 0x42038 #define TSDM_REG_AGG_INT_EVENT_0 0x42038
#define TSDM_REG_AGG_INT_EVENT_1 0x4203c #define TSDM_REG_AGG_INT_EVENT_1 0x4203c
@ -3835,6 +3891,8 @@
#define TSDM_REG_TSDM_PRTY_MASK 0x422bc #define TSDM_REG_TSDM_PRTY_MASK 0x422bc
/* [R 11] Parity register #0 read */ /* [R 11] Parity register #0 read */
#define TSDM_REG_TSDM_PRTY_STS 0x422b0 #define TSDM_REG_TSDM_PRTY_STS 0x422b0
/* [RC 11] Parity register #0 read clear */
#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
/* [RW 5] The number of time_slots in the arbitration cycle */ /* [RW 5] The number of time_slots in the arbitration cycle */
#define TSEM_REG_ARB_CYCLE_SIZE 0x180034 #define TSEM_REG_ARB_CYCLE_SIZE 0x180034
/* [RW 3] The source that is associated with arbitration element 0. Source /* [RW 3] The source that is associated with arbitration element 0. Source
@ -3914,6 +3972,9 @@
#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 #define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
/* [RW 8] List of free threads . There is a bit per thread. */ /* [RW 8] List of free threads . There is a bit per thread. */
#define TSEM_REG_THREADS_LIST 0x1802e4 #define TSEM_REG_THREADS_LIST 0x1802e4
/* [RC 32] Parity register #0 read clear */
#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
/* [RW 3] The arbitration scheme of time_slot 0 */ /* [RW 3] The arbitration scheme of time_slot 0 */
#define TSEM_REG_TS_0_AS 0x180038 #define TSEM_REG_TS_0_AS 0x180038
/* [RW 3] The arbitration scheme of time_slot 10 */ /* [RW 3] The arbitration scheme of time_slot 10 */
@ -4116,6 +4177,8 @@
#define UCM_REG_UCM_INT_STS 0xe01c8 #define UCM_REG_UCM_INT_STS 0xe01c8
/* [R 27] Parity register #0 read */ /* [R 27] Parity register #0 read */
#define UCM_REG_UCM_PRTY_STS 0xe01d8 #define UCM_REG_UCM_PRTY_STS 0xe01d8
/* [RC 27] Parity register #0 read clear */
#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back; Is used to determine the number of the AG context REG-pairs written back;
@ -4292,6 +4355,8 @@
#define USDM_REG_USDM_PRTY_MASK 0xc42c0 #define USDM_REG_USDM_PRTY_MASK 0xc42c0
/* [R 11] Parity register #0 read */ /* [R 11] Parity register #0 read */
#define USDM_REG_USDM_PRTY_STS 0xc42b4 #define USDM_REG_USDM_PRTY_STS 0xc42b4
/* [RC 11] Parity register #0 read clear */
#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
/* [RW 5] The number of time_slots in the arbitration cycle */ /* [RW 5] The number of time_slots in the arbitration cycle */
#define USEM_REG_ARB_CYCLE_SIZE 0x300034 #define USEM_REG_ARB_CYCLE_SIZE 0x300034
/* [RW 3] The source that is associated with arbitration element 0. Source /* [RW 3] The source that is associated with arbitration element 0. Source
@ -4421,6 +4486,9 @@
/* [R 32] Parity register #0 read */ /* [R 32] Parity register #0 read */
#define USEM_REG_USEM_PRTY_STS_0 0x300124 #define USEM_REG_USEM_PRTY_STS_0 0x300124
#define USEM_REG_USEM_PRTY_STS_1 0x300134 #define USEM_REG_USEM_PRTY_STS_1 0x300134
/* [RC 32] Parity register #0 read clear */
#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 /* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
* VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
#define USEM_REG_VFPF_ERR_NUM 0x300380 #define USEM_REG_VFPF_ERR_NUM 0x300380
@ -4797,6 +4865,8 @@
#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc #define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
/* [R 11] Parity register #0 read */ /* [R 11] Parity register #0 read */
#define XSDM_REG_XSDM_PRTY_STS 0x1662b0 #define XSDM_REG_XSDM_PRTY_STS 0x1662b0
/* [RC 11] Parity register #0 read clear */
#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
/* [RW 5] The number of time_slots in the arbitration cycle */ /* [RW 5] The number of time_slots in the arbitration cycle */
#define XSEM_REG_ARB_CYCLE_SIZE 0x280034 #define XSEM_REG_ARB_CYCLE_SIZE 0x280034
/* [RW 3] The source that is associated with arbitration element 0. Source /* [RW 3] The source that is associated with arbitration element 0. Source
@ -4929,6 +4999,9 @@
/* [R 32] Parity register #0 read */ /* [R 32] Parity register #0 read */
#define XSEM_REG_XSEM_PRTY_STS_0 0x280124 #define XSEM_REG_XSEM_PRTY_STS_0 0x280124
#define XSEM_REG_XSEM_PRTY_STS_1 0x280134 #define XSEM_REG_XSEM_PRTY_STS_1 0x280134
/* [RC 32] Parity register #0 read clear */
#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) #define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) #define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) #define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@ -6316,3 +6389,4 @@ static inline u8 calc_crc8(u32 data, u8 crc)
} }
#endif /* BNX2X_REG_H */

View File

@ -158,6 +158,11 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
spin_lock_bh(&bp->stats_lock); spin_lock_bh(&bp->stats_lock);
if (bp->stats_pending) {
spin_unlock_bh(&bp->stats_lock);
return;
}
ramrod_data.drv_counter = bp->stats_counter++; ramrod_data.drv_counter = bp->stats_counter++;
ramrod_data.collect_port = bp->port.pmf ? 1 : 0; ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
for_each_eth_queue(bp, i) for_each_eth_queue(bp, i)

View File

@ -749,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev)
netif_set_real_num_tx_queues(dev, pi->nqsets); netif_set_real_num_tx_queues(dev, pi->nqsets);
err = netif_set_real_num_rx_queues(dev, pi->nqsets); err = netif_set_real_num_rx_queues(dev, pi->nqsets);
if (err) if (err)
return err; goto err_unwind;
set_bit(pi->port_id, &adapter->open_device_map);
err = link_start(dev); err = link_start(dev);
if (err) if (err)
return err; goto err_unwind;
netif_tx_start_all_queues(dev); netif_tx_start_all_queues(dev);
set_bit(pi->port_id, &adapter->open_device_map);
return 0; return 0;
err_unwind:
if (adapter->open_device_map == 0)
adapter_down(adapter);
return err;
} }
/* /*
@ -764,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev)
*/ */
static int cxgb4vf_stop(struct net_device *dev) static int cxgb4vf_stop(struct net_device *dev)
{ {
int ret;
struct port_info *pi = netdev_priv(dev); struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter; struct adapter *adapter = pi->adapter;
netif_tx_stop_all_queues(dev); netif_tx_stop_all_queues(dev);
netif_carrier_off(dev); netif_carrier_off(dev);
ret = t4vf_enable_vi(adapter, pi->viid, false, false); t4vf_enable_vi(adapter, pi->viid, false, false);
pi->link_cfg.link_ok = 0; pi->link_cfg.link_ok = 0;
clear_bit(pi->port_id, &adapter->open_device_map); clear_bit(pi->port_id, &adapter->open_device_map);

View File

@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
/* /*
* Write the command array into the Mailbox Data register array and * Write the command array into the Mailbox Data register array and
* transfer ownership of the mailbox to the firmware. * transfer ownership of the mailbox to the firmware.
*
* For the VFs, the Mailbox Data "registers" are actually backed by
* T4's "MA" interface rather than PL Registers (as is the case for
* the PFs). Because these are in different coherency domains, the
* write to the VF's PL-register-backed Mailbox Control can race in
* front of the writes to the MA-backed VF Mailbox Data "registers".
* So we need to do a read-back on at least one byte of the VF Mailbox
* Data registers before doing the write to the VF Mailbox Control
* register.
*/ */
for (i = 0, p = cmd; i < size; i += 8) for (i = 0, p = cmd; i < size; i += 8)
t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
t4_read_reg(adapter, mbox_data); /* flush write */
t4_write_reg(adapter, mbox_ctl, t4_write_reg(adapter, mbox_ctl,
MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
t4_read_reg(adapter, mbox_ctl); /* flush write */ t4_read_reg(adapter, mbox_ctl); /* flush write */

View File

@ -130,10 +130,15 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
if (hw->mac_type == e1000_82541 || if (hw->mac_type == e1000_82541 ||
hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82541_rev_2 ||
hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547 ||
hw->mac_type == e1000_82547_rev_2) { hw->mac_type == e1000_82547_rev_2)
hw->phy_type = e1000_phy_igp; hw->phy_type = e1000_phy_igp;
break; break;
} case RTL8211B_PHY_ID:
hw->phy_type = e1000_phy_8211;
break;
case RTL8201N_PHY_ID:
hw->phy_type = e1000_phy_8201;
break;
default: default:
/* Should never have loaded on this device */ /* Should never have loaded on this device */
hw->phy_type = e1000_phy_undefined; hw->phy_type = e1000_phy_undefined;
@ -318,6 +323,9 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82547GI: case E1000_DEV_ID_82547GI:
hw->mac_type = e1000_82547_rev_2; hw->mac_type = e1000_82547_rev_2;
break; break;
case E1000_DEV_ID_INTEL_CE4100_GBE:
hw->mac_type = e1000_ce4100;
break;
default: default:
/* Should never have loaded on this device */ /* Should never have loaded on this device */
return -E1000_ERR_MAC_TYPE; return -E1000_ERR_MAC_TYPE;
@ -372,6 +380,9 @@ void e1000_set_media_type(struct e1000_hw *hw)
case e1000_82542_rev2_1: case e1000_82542_rev2_1:
hw->media_type = e1000_media_type_fiber; hw->media_type = e1000_media_type_fiber;
break; break;
case e1000_ce4100:
hw->media_type = e1000_media_type_copper;
break;
default: default:
status = er32(STATUS); status = er32(STATUS);
if (status & E1000_STATUS_TBIMODE) { if (status & E1000_STATUS_TBIMODE) {
@ -460,6 +471,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
/* Reset is performed on a shadow of the control register */ /* Reset is performed on a shadow of the control register */
ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
break; break;
case e1000_ce4100:
default: default:
ew32(CTRL, (ctrl | E1000_CTRL_RST)); ew32(CTRL, (ctrl | E1000_CTRL_RST));
break; break;
@ -951,6 +963,67 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
return E1000_SUCCESS; return E1000_SUCCESS;
} }
/**
* e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
* @hw: Struct containing variables accessed by shared code
*
* Commits changes to PHY configuration by calling e1000_phy_reset().
*/
static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
{
s32 ret_val;
/* SW reset the PHY so all changes take effect */
ret_val = e1000_phy_reset(hw);
if (ret_val) {
e_dbg("Error Resetting the PHY\n");
return ret_val;
}
return E1000_SUCCESS;
}
static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
{
s32 ret_val;
u32 ctrl_aux;
switch (hw->phy_type) {
case e1000_phy_8211:
ret_val = e1000_copper_link_rtl_setup(hw);
if (ret_val) {
e_dbg("e1000_copper_link_rtl_setup failed!\n");
return ret_val;
}
break;
case e1000_phy_8201:
/* Set RMII mode */
ctrl_aux = er32(CTL_AUX);
ctrl_aux |= E1000_CTL_AUX_RMII;
ew32(CTL_AUX, ctrl_aux);
E1000_WRITE_FLUSH();
/* Disable the J/K bits required for receive */
ctrl_aux = er32(CTL_AUX);
ctrl_aux |= 0x4;
ctrl_aux &= ~0x2;
ew32(CTL_AUX, ctrl_aux);
E1000_WRITE_FLUSH();
ret_val = e1000_copper_link_rtl_setup(hw);
if (ret_val) {
e_dbg("e1000_copper_link_rtl_setup failed!\n");
return ret_val;
}
break;
default:
e_dbg("Error Resetting the PHY\n");
return E1000_ERR_PHY_TYPE;
}
return E1000_SUCCESS;
}
/** /**
* e1000_copper_link_preconfig - early configuration for copper * e1000_copper_link_preconfig - early configuration for copper
* @hw: Struct containing variables accessed by shared code * @hw: Struct containing variables accessed by shared code
@ -1286,6 +1359,10 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->autoneg_advertised == 0) if (hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
/* IFE/RTL8201N PHY only supports 10/100 */
if (hw->phy_type == e1000_phy_8201)
hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
e_dbg("Reconfiguring auto-neg advertisement params\n"); e_dbg("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw); ret_val = e1000_phy_setup_autoneg(hw);
if (ret_val) { if (ret_val) {
@ -1341,7 +1418,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
s32 ret_val; s32 ret_val;
e_dbg("e1000_copper_link_postconfig"); e_dbg("e1000_copper_link_postconfig");
if (hw->mac_type >= e1000_82544) { if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
e1000_config_collision_dist(hw); e1000_config_collision_dist(hw);
} else { } else {
ret_val = e1000_config_mac_to_phy(hw); ret_val = e1000_config_mac_to_phy(hw);
@ -1395,6 +1472,12 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
ret_val = e1000_copper_link_mgp_setup(hw); ret_val = e1000_copper_link_mgp_setup(hw);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
} else {
ret_val = gbe_dhg_phy_setup(hw);
if (ret_val) {
e_dbg("gbe_dhg_phy_setup failed!\n");
return ret_val;
}
} }
if (hw->autoneg) { if (hw->autoneg) {
@ -1461,10 +1544,11 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
return ret_val; return ret_val;
/* Read the MII 1000Base-T Control Register (Address 9). */ /* Read the MII 1000Base-T Control Register (Address 9). */
ret_val = ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
else if (hw->phy_type == e1000_phy_8201)
mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
/* Need to parse both autoneg_advertised and fc and set up /* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for * the appropriate PHY registers. First we will parse for
@ -1577,9 +1661,14 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); if (hw->phy_type == e1000_phy_8201) {
if (ret_val) mii_1000t_ctrl_reg = 0;
return ret_val; } else {
ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
}
return E1000_SUCCESS; return E1000_SUCCESS;
} }
@ -1860,7 +1949,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
/* 82544 or newer MAC, Auto Speed Detection takes care of /* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/ * MAC speed/duplex configuration.*/
if (hw->mac_type >= e1000_82544) if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
return E1000_SUCCESS; return E1000_SUCCESS;
/* Read the Device Control Register and set the bits to Force Speed /* Read the Device Control Register and set the bits to Force Speed
@ -1870,27 +1959,49 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
/* Set up duplex in the Device Control and Transmit Control switch (hw->phy_type) {
* registers depending on negotiated values. case e1000_phy_8201:
*/ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val)
if (ret_val) return ret_val;
return ret_val;
if (phy_data & M88E1000_PSSR_DPLX) if (phy_data & RTL_PHY_CTRL_FD)
ctrl |= E1000_CTRL_FD; ctrl |= E1000_CTRL_FD;
else else
ctrl &= ~E1000_CTRL_FD; ctrl &= ~E1000_CTRL_FD;
e1000_config_collision_dist(hw); if (phy_data & RTL_PHY_CTRL_SPD_100)
ctrl |= E1000_CTRL_SPD_100;
else
ctrl |= E1000_CTRL_SPD_10;
/* Set up speed in the Device Control register depending on e1000_config_collision_dist(hw);
* negotiated values. break;
*/ default:
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) /* Set up duplex in the Device Control and Transmit Control
ctrl |= E1000_CTRL_SPD_1000; * registers depending on negotiated values.
else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) */
ctrl |= E1000_CTRL_SPD_100; ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
&phy_data);
if (ret_val)
return ret_val;
if (phy_data & M88E1000_PSSR_DPLX)
ctrl |= E1000_CTRL_FD;
else
ctrl &= ~E1000_CTRL_FD;
e1000_config_collision_dist(hw);
/* Set up speed in the Device Control register depending on
* negotiated values.
*/
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
ctrl |= E1000_CTRL_SPD_1000;
else if ((phy_data & M88E1000_PSSR_SPEED) ==
M88E1000_PSSR_100MBS)
ctrl |= E1000_CTRL_SPD_100;
}
/* Write the configured values back to the Device Control Reg. */ /* Write the configured values back to the Device Control Reg. */
ew32(CTRL, ctrl); ew32(CTRL, ctrl);
@ -2401,7 +2512,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
* speed/duplex on the MAC to the current PHY speed/duplex * speed/duplex on the MAC to the current PHY speed/duplex
* settings. * settings.
*/ */
if (hw->mac_type >= e1000_82544) if ((hw->mac_type >= e1000_82544) &&
(hw->mac_type != e1000_ce4100))
e1000_config_collision_dist(hw); e1000_config_collision_dist(hw);
else { else {
ret_val = e1000_config_mac_to_phy(hw); ret_val = e1000_config_mac_to_phy(hw);
@ -2738,7 +2850,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
{ {
u32 i; u32 i;
u32 mdic = 0; u32 mdic = 0;
const u32 phy_addr = 1; const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
e_dbg("e1000_read_phy_reg_ex"); e_dbg("e1000_read_phy_reg_ex");
@ -2752,28 +2864,61 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
* Control register. The MAC will take care of interfacing with the * Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data. * PHY to retrieve the desired data.
*/ */
mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | if (hw->mac_type == e1000_ce4100) {
(phy_addr << E1000_MDIC_PHY_SHIFT) | mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
(E1000_MDIC_OP_READ)); (phy_addr << E1000_MDIC_PHY_SHIFT) |
(INTEL_CE_GBE_MDIC_OP_READ) |
(INTEL_CE_GBE_MDIC_GO));
ew32(MDIC, mdic); writel(mdic, E1000_MDIO_CMD);
/* Poll the ready bit to see if the MDI read completed */ /* Poll the ready bit to see if the MDI read
for (i = 0; i < 64; i++) { * completed
udelay(50); */
mdic = er32(MDIC); for (i = 0; i < 64; i++) {
if (mdic & E1000_MDIC_READY) udelay(50);
break; mdic = readl(E1000_MDIO_CMD);
if (!(mdic & INTEL_CE_GBE_MDIC_GO))
break;
}
if (mdic & INTEL_CE_GBE_MDIC_GO) {
e_dbg("MDI Read did not complete\n");
return -E1000_ERR_PHY;
}
mdic = readl(E1000_MDIO_STS);
if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
e_dbg("MDI Read Error\n");
return -E1000_ERR_PHY;
}
*phy_data = (u16) mdic;
} else {
mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
(phy_addr << E1000_MDIC_PHY_SHIFT) |
(E1000_MDIC_OP_READ));
ew32(MDIC, mdic);
/* Poll the ready bit to see if the MDI read
* completed
*/
for (i = 0; i < 64; i++) {
udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
e_dbg("MDI Read did not complete\n");
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
*phy_data = (u16) mdic;
} }
if (!(mdic & E1000_MDIC_READY)) {
e_dbg("MDI Read did not complete\n");
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
*phy_data = (u16) mdic;
} else { } else {
/* We must first send a preamble through the MDIO pin to signal the /* We must first send a preamble through the MDIO pin to signal the
* beginning of an MII instruction. This is done by sending 32 * beginning of an MII instruction. This is done by sending 32
@ -2840,7 +2985,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
{ {
u32 i; u32 i;
u32 mdic = 0; u32 mdic = 0;
const u32 phy_addr = 1; const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
e_dbg("e1000_write_phy_reg_ex"); e_dbg("e1000_write_phy_reg_ex");
@ -2850,27 +2995,54 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
} }
if (hw->mac_type > e1000_82543) { if (hw->mac_type > e1000_82543) {
/* Set up Op-code, Phy Address, register address, and data intended /* Set up Op-code, Phy Address, register address, and data
* for the PHY register in the MDI Control register. The MAC will take * intended for the PHY register in the MDI Control register.
* care of interfacing with the PHY to send the desired data. * The MAC will take care of interfacing with the PHY to send
* the desired data.
*/ */
mdic = (((u32) phy_data) | if (hw->mac_type == e1000_ce4100) {
(reg_addr << E1000_MDIC_REG_SHIFT) | mdic = (((u32) phy_data) |
(phy_addr << E1000_MDIC_PHY_SHIFT) | (reg_addr << E1000_MDIC_REG_SHIFT) |
(E1000_MDIC_OP_WRITE)); (phy_addr << E1000_MDIC_PHY_SHIFT) |
(INTEL_CE_GBE_MDIC_OP_WRITE) |
(INTEL_CE_GBE_MDIC_GO));
ew32(MDIC, mdic); writel(mdic, E1000_MDIO_CMD);
/* Poll the ready bit to see if the MDI read completed */ /* Poll the ready bit to see if the MDI read
for (i = 0; i < 641; i++) { * completed
udelay(5); */
mdic = er32(MDIC); for (i = 0; i < 640; i++) {
if (mdic & E1000_MDIC_READY) udelay(5);
break; mdic = readl(E1000_MDIO_CMD);
} if (!(mdic & INTEL_CE_GBE_MDIC_GO))
if (!(mdic & E1000_MDIC_READY)) { break;
e_dbg("MDI Write did not complete\n"); }
return -E1000_ERR_PHY; if (mdic & INTEL_CE_GBE_MDIC_GO) {
e_dbg("MDI Write did not complete\n");
return -E1000_ERR_PHY;
}
} else {
mdic = (((u32) phy_data) |
(reg_addr << E1000_MDIC_REG_SHIFT) |
(phy_addr << E1000_MDIC_PHY_SHIFT) |
(E1000_MDIC_OP_WRITE));
ew32(MDIC, mdic);
/* Poll the ready bit to see if the MDI read
* completed
*/
for (i = 0; i < 641; i++) {
udelay(5);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
e_dbg("MDI Write did not complete\n");
return -E1000_ERR_PHY;
}
} }
} else { } else {
/* We'll need to use the SW defined pins to shift the write command /* We'll need to use the SW defined pins to shift the write command
@ -3048,6 +3220,11 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
if (hw->phy_id == M88E1011_I_PHY_ID) if (hw->phy_id == M88E1011_I_PHY_ID)
match = true; match = true;
break; break;
case e1000_ce4100:
if ((hw->phy_id == RTL8211B_PHY_ID) ||
(hw->phy_id == RTL8201N_PHY_ID))
match = true;
break;
case e1000_82541: case e1000_82541:
case e1000_82541_rev_2: case e1000_82541_rev_2:
case e1000_82547: case e1000_82547:
@ -3291,6 +3468,9 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
if (hw->phy_type == e1000_phy_igp) if (hw->phy_type == e1000_phy_igp)
return e1000_phy_igp_get_info(hw, phy_info); return e1000_phy_igp_get_info(hw, phy_info);
else if ((hw->phy_type == e1000_phy_8211) ||
(hw->phy_type == e1000_phy_8201))
return E1000_SUCCESS;
else else
return e1000_phy_m88_get_info(hw, phy_info); return e1000_phy_m88_get_info(hw, phy_info);
} }
@ -3742,6 +3922,12 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e_dbg("e1000_read_eeprom"); e_dbg("e1000_read_eeprom");
if (hw->mac_type == e1000_ce4100) {
GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
data);
return E1000_SUCCESS;
}
/* If eeprom is not yet detected, do so now */ /* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0) if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw); e1000_init_eeprom_params(hw);
@ -3904,6 +4090,12 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e_dbg("e1000_write_eeprom"); e_dbg("e1000_write_eeprom");
if (hw->mac_type == e1000_ce4100) {
GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
data);
return E1000_SUCCESS;
}
/* If eeprom is not yet detected, do so now */ /* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0) if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw); e1000_init_eeprom_params(hw);

View File

@ -52,6 +52,7 @@ typedef enum {
e1000_82545, e1000_82545,
e1000_82545_rev_3, e1000_82545_rev_3,
e1000_82546, e1000_82546,
e1000_ce4100,
e1000_82546_rev_3, e1000_82546_rev_3,
e1000_82541, e1000_82541,
e1000_82541_rev_2, e1000_82541_rev_2,
@ -209,9 +210,11 @@ typedef enum {
} e1000_1000t_rx_status; } e1000_1000t_rx_status;
typedef enum { typedef enum {
e1000_phy_m88 = 0, e1000_phy_m88 = 0,
e1000_phy_igp, e1000_phy_igp,
e1000_phy_undefined = 0xFF e1000_phy_8211,
e1000_phy_8201,
e1000_phy_undefined = 0xFF
} e1000_phy_type; } e1000_phy_type;
typedef enum { typedef enum {
@ -442,6 +445,7 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
#define E1000_DEV_ID_82547EI 0x1019 #define E1000_DEV_ID_82547EI 0x1019
#define E1000_DEV_ID_82547EI_MOBILE 0x101A #define E1000_DEV_ID_82547EI_MOBILE 0x101A
#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
#define NODE_ADDRESS_SIZE 6 #define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6 #define ETH_LENGTH_OF_ADDRESS 6
@ -808,6 +812,16 @@ struct e1000_ffvt_entry {
#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define E1000_FLA 0x0001C /* Flash Access - RW */ #define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */ #define E1000_MDIC 0x00020 /* MDI Control - RW */
extern void __iomem *ce4100_gbe_mdio_base_virt;
#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
#define E1000_SCTL 0x00024 /* SerDes Control - RW */ #define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ #define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
@ -820,6 +834,34 @@ struct e1000_ffvt_entry {
#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
/* Auxiliary Control Register. This register is CE4100 specific,
* RMII/RGMII function is switched by this register - RW
* Following are bits definitions of the Auxiliary Control Register
*/
#define E1000_CTL_AUX 0x000E0
#define E1000_CTL_AUX_END_SEL_SHIFT 10
#define E1000_CTL_AUX_ENDIANESS_SHIFT 8
#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0
/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
/* descriptor use CTL_AUX.ENDIANESS, packet use default */
#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
/* descriptor use default, packet use CTL_AUX.ENDIANESS */
#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
/* all use CTL_AUX.ENDIANESS */
#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
/* LW little endian, Byte big endian */
#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
#define E1000_RCTL 0x00100 /* RX Control - RW */ #define E1000_RCTL 0x00100 /* RX Control - RW */
#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ #define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ #define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
@ -1011,6 +1053,7 @@ struct e1000_ffvt_entry {
* in more current versions of the 8254x. Despite the difference in location, * in more current versions of the 8254x. Despite the difference in location,
* the registers function in the same manner. * the registers function in the same manner.
*/ */
#define E1000_82542_CTL_AUX E1000_CTL_AUX
#define E1000_82542_CTRL E1000_CTRL #define E1000_82542_CTRL E1000_CTRL
#define E1000_82542_CTRL_DUP E1000_CTRL_DUP #define E1000_82542_CTRL_DUP E1000_CTRL_DUP
#define E1000_82542_STATUS E1000_STATUS #define E1000_82542_STATUS E1000_STATUS
@ -1571,6 +1614,11 @@ struct e1000_hw {
#define E1000_MDIC_INT_EN 0x20000000 #define E1000_MDIC_INT_EN 0x20000000
#define E1000_MDIC_ERROR 0x40000000 #define E1000_MDIC_ERROR 0x40000000
#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000
#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000
#define INTEL_CE_GBE_MDIC_GO 0x80000000
#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000
#define E1000_KUMCTRLSTA_MASK 0x0000FFFF #define E1000_KUMCTRLSTA_MASK 0x0000FFFF
#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 #define E1000_KUMCTRLSTA_OFFSET 0x001F0000
#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 #define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
@ -2871,6 +2919,11 @@ struct e1000_host_command_info {
#define M88E1111_I_PHY_ID 0x01410CC0 #define M88E1111_I_PHY_ID 0x01410CC0
#define L1LXT971A_PHY_ID 0x001378E0 #define L1LXT971A_PHY_ID 0x001378E0
#define RTL8211B_PHY_ID 0x001CC910
#define RTL8201N_PHY_ID 0x8200
#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */
#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */
/* Bits... /* Bits...
* 15-5: page * 15-5: page
* 4-0: register offset * 4-0: register offset

View File

@ -28,6 +28,12 @@
#include "e1000.h" #include "e1000.h"
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include <linux/io.h>
/* Intel Media SOC GbE MDIO physical base address */
static unsigned long ce4100_gbe_mdio_base_phy;
/* Intel Media SOC GbE MDIO virtual base address */
void __iomem *ce4100_gbe_mdio_base_virt;
char e1000_driver_name[] = "e1000"; char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@ -79,6 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
INTEL_E1000_ETHERNET_DEVICE(0x108A), INTEL_E1000_ETHERNET_DEVICE(0x108A),
INTEL_E1000_ETHERNET_DEVICE(0x1099), INTEL_E1000_ETHERNET_DEVICE(0x1099),
INTEL_E1000_ETHERNET_DEVICE(0x10B5), INTEL_E1000_ETHERNET_DEVICE(0x10B5),
INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
/* required last entry */ /* required last entry */
{0,} {0,}
}; };
@ -459,6 +466,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
case e1000_82545: case e1000_82545:
case e1000_82545_rev_3: case e1000_82545_rev_3:
case e1000_82546: case e1000_82546:
case e1000_ce4100:
case e1000_82546_rev_3: case e1000_82546_rev_3:
case e1000_82541: case e1000_82541:
case e1000_82541_rev_2: case e1000_82541_rev_2:
@ -573,6 +581,7 @@ void e1000_reset(struct e1000_adapter *adapter)
case e1000_82545: case e1000_82545:
case e1000_82545_rev_3: case e1000_82545_rev_3:
case e1000_82546: case e1000_82546:
case e1000_ce4100:
case e1000_82546_rev_3: case e1000_82546_rev_3:
pba = E1000_PBA_48K; pba = E1000_PBA_48K;
break; break;
@ -894,6 +903,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
static int global_quad_port_a = 0; /* global ksp3 port a indication */ static int global_quad_port_a = 0; /* global ksp3 port a indication */
int i, err, pci_using_dac; int i, err, pci_using_dac;
u16 eeprom_data = 0; u16 eeprom_data = 0;
u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME; u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport; int bars, need_ioport;
@ -996,6 +1006,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_sw_init; goto err_sw_init;
err = -EIO; err = -EIO;
if (hw->mac_type == e1000_ce4100) {
ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
pci_resource_len(pdev, BAR_1));
if (!ce4100_gbe_mdio_base_virt)
goto err_mdio_ioremap;
}
if (hw->mac_type >= e1000_82543) { if (hw->mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG | netdev->features = NETIF_F_SG |
@ -1135,6 +1153,20 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->wol = adapter->eeprom_wol; adapter->wol = adapter->eeprom_wol;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* Auto detect PHY address */
if (hw->mac_type == e1000_ce4100) {
for (i = 0; i < 32; i++) {
hw->phy_addr = i;
e1000_read_phy_reg(hw, PHY_ID2, &tmp);
if (tmp == 0 || tmp == 0xFF) {
if (i == 31)
goto err_eeprom;
continue;
} else
break;
}
}
/* reset the hardware with the new settings */ /* reset the hardware with the new settings */
e1000_reset(adapter); e1000_reset(adapter);
@ -1171,6 +1203,8 @@ err_eeprom:
kfree(adapter->rx_ring); kfree(adapter->rx_ring);
err_dma: err_dma:
err_sw_init: err_sw_init:
err_mdio_ioremap:
iounmap(ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr); iounmap(hw->hw_addr);
err_ioremap: err_ioremap:
free_netdev(netdev); free_netdev(netdev);
@ -1409,6 +1443,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
/* First rev 82545 and 82546 need to not allow any memory /* First rev 82545 and 82546 need to not allow any memory
* write location to cross 64k boundary due to errata 23 */ * write location to cross 64k boundary due to errata 23 */
if (hw->mac_type == e1000_82545 || if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) { hw->mac_type == e1000_82546) {
return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
} }

View File

@ -34,12 +34,21 @@
#ifndef _E1000_OSDEP_H_ #ifndef _E1000_OSDEP_H_
#define _E1000_OSDEP_H_ #define _E1000_OSDEP_H_
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <asm/io.h> #include <asm/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h> #define CONFIG_RAM_BASE 0x60000
#define GBE_CONFIG_OFFSET 0x0
#define GBE_CONFIG_RAM_BASE \
((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE)
#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
(iowrite16_rep(base + offset, data, count))
#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
(ioread16_rep(base + (offset << 1), data, count))
#define er32(reg) \ #define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \

View File

@ -78,6 +78,8 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
/** /**
* e1000_init_phy_params_82571 - Init PHY func ptrs. * e1000_init_phy_params_82571 - Init PHY func ptrs.
@ -113,6 +115,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->type = e1000_phy_bm; phy->type = e1000_phy_bm;
phy->ops.acquire = e1000_get_hw_semaphore_82574; phy->ops.acquire = e1000_get_hw_semaphore_82574;
phy->ops.release = e1000_put_hw_semaphore_82574; phy->ops.release = e1000_put_hw_semaphore_82574;
phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
break; break;
default: default:
return -E1000_ERR_PHY; return -E1000_ERR_PHY;
@ -121,29 +125,36 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
/* This can only be done after all function pointers are setup. */ /* This can only be done after all function pointers are setup. */
ret_val = e1000_get_phy_id_82571(hw); ret_val = e1000_get_phy_id_82571(hw);
if (ret_val) {
e_dbg("Error getting PHY ID\n");
return ret_val;
}
/* Verify phy id */ /* Verify phy id */
switch (hw->mac.type) { switch (hw->mac.type) {
case e1000_82571: case e1000_82571:
case e1000_82572: case e1000_82572:
if (phy->id != IGP01E1000_I_PHY_ID) if (phy->id != IGP01E1000_I_PHY_ID)
return -E1000_ERR_PHY; ret_val = -E1000_ERR_PHY;
break; break;
case e1000_82573: case e1000_82573:
if (phy->id != M88E1111_I_PHY_ID) if (phy->id != M88E1111_I_PHY_ID)
return -E1000_ERR_PHY; ret_val = -E1000_ERR_PHY;
break; break;
case e1000_82574: case e1000_82574:
case e1000_82583: case e1000_82583:
if (phy->id != BME1000_E_PHY_ID_R2) if (phy->id != BME1000_E_PHY_ID_R2)
return -E1000_ERR_PHY; ret_val = -E1000_ERR_PHY;
break; break;
default: default:
return -E1000_ERR_PHY; ret_val = -E1000_ERR_PHY;
break; break;
} }
return 0; if (ret_val)
e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
return ret_val;
} }
/** /**
@ -648,6 +659,58 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
mutex_unlock(&swflag_mutex); mutex_unlock(&swflag_mutex);
} }
/**
* e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
* @active: true to enable LPLU, false to disable
*
* Sets the LPLU D0 state according to the active flag.
* LPLU will not be activated unless the
* device autonegotiation advertisement meets standards of
* either 10 or 10/100 or 10/100/1000 at all duplexes.
* This is a function pointer entry point only called by
* PHY setup routines.
**/
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
{
u16 data = er32(POEMB);
if (active)
data |= E1000_PHY_CTRL_D0A_LPLU;
else
data &= ~E1000_PHY_CTRL_D0A_LPLU;
ew32(POEMB, data);
return 0;
}
/**
* e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
* @hw: pointer to the HW structure
* @active: boolean used to enable/disable lplu
*
* The low power link up (lplu) state is set to the power management level D3
* when active is true, else clear lplu for D3. LPLU
* is used during Dx states where the power conservation is most important.
* During driver activity, SmartSpeed should be enabled so performance is
* maintained.
**/
static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
{
u16 data = er32(POEMB);
if (!active) {
data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
} else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
(hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
(hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
data |= E1000_PHY_CTRL_NOND0A_LPLU;
}
ew32(POEMB, data);
return 0;
}
/** /**
* e1000_acquire_nvm_82571 - Request for access to the EEPROM * e1000_acquire_nvm_82571 - Request for access to the EEPROM
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
@ -956,7 +1019,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
**/ **/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw) static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{ {
u32 ctrl, ctrl_ext, icr; u32 ctrl, ctrl_ext;
s32 ret_val; s32 ret_val;
/* /*
@ -1040,7 +1103,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* Clear any pending interrupt events. */ /* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff); ew32(IMC, 0xffffffff);
icr = er32(ICR); er32(ICR);
if (hw->mac.type == e1000_82571) { if (hw->mac.type == e1000_82571) {
/* Install any alternate MAC address into RAR0 */ /* Install any alternate MAC address into RAR0 */

View File

@ -38,6 +38,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pci-aspm.h> #include <linux/pci-aspm.h>
#include <linux/crc32.h>
#include "hw.h" #include "hw.h"
@ -496,6 +497,8 @@ extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_update_stats(struct e1000_adapter *adapter); extern void e1000e_update_stats(struct e1000_adapter *adapter);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
extern unsigned int copybreak; extern unsigned int copybreak;

View File

@ -784,7 +784,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
**/ **/
static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
{ {
u32 ctrl, icr; u32 ctrl;
s32 ret_val; s32 ret_val;
/* /*
@ -818,7 +818,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
/* Clear any pending interrupt events. */ /* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff); ew32(IMC, 0xffffffff);
icr = er32(ICR); er32(ICR);
ret_val = e1000_check_alt_mac_addr_generic(hw); ret_val = e1000_check_alt_mac_addr_generic(hw);

View File

@ -624,20 +624,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
char firmware_version[32]; char firmware_version[32];
strncpy(drvinfo->driver, e1000e_driver_name, 32); strncpy(drvinfo->driver, e1000e_driver_name,
strncpy(drvinfo->version, e1000e_driver_version, 32); sizeof(drvinfo->driver) - 1);
strncpy(drvinfo->version, e1000e_driver_version,
sizeof(drvinfo->version) - 1);
/* /*
* EEPROM image version # is reported as firmware version # for * EEPROM image version # is reported as firmware version # for
* PCI-E controllers * PCI-E controllers
*/ */
sprintf(firmware_version, "%d.%d-%d", snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
(adapter->eeprom_vers & 0xF000) >> 12, (adapter->eeprom_vers & 0xF000) >> 12,
(adapter->eeprom_vers & 0x0FF0) >> 4, (adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F)); (adapter->eeprom_vers & 0x000F));
strncpy(drvinfo->fw_version, firmware_version, 32); strncpy(drvinfo->fw_version, firmware_version,
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); sizeof(drvinfo->fw_version) - 1);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info) - 1);
drvinfo->regdump_len = e1000_get_regs_len(netdev); drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev); drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
} }
@ -1704,6 +1708,19 @@ static void e1000_diag_test(struct net_device *netdev,
bool if_running = netif_running(netdev); bool if_running = netif_running(netdev);
set_bit(__E1000_TESTING, &adapter->state); set_bit(__E1000_TESTING, &adapter->state);
if (!if_running) {
/* Get control of and reset hardware */
if (adapter->flags & FLAG_HAS_AMT)
e1000e_get_hw_control(adapter);
e1000e_power_up_phy(adapter);
adapter->hw.phy.autoneg_wait_to_complete = 1;
e1000e_reset(adapter);
adapter->hw.phy.autoneg_wait_to_complete = 0;
}
if (eth_test->flags == ETH_TEST_FL_OFFLINE) { if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */ /* Offline tests */
@ -1717,8 +1734,6 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running) if (if_running)
/* indicate we're in test mode */ /* indicate we're in test mode */
dev_close(netdev); dev_close(netdev);
else
e1000e_reset(adapter);
if (e1000_reg_test(adapter, &data[0])) if (e1000_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
@ -1732,8 +1747,6 @@ static void e1000_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
e1000e_reset(adapter); e1000e_reset(adapter);
/* make sure the phy is powered up */
e1000e_power_up_phy(adapter);
if (e1000_loopback_test(adapter, &data[3])) if (e1000_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
@ -1755,28 +1768,29 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running) if (if_running)
dev_open(netdev); dev_open(netdev);
} else { } else {
if (!if_running && (adapter->flags & FLAG_HAS_AMT)) { /* Online tests */
clear_bit(__E1000_TESTING, &adapter->state);
dev_open(netdev);
set_bit(__E1000_TESTING, &adapter->state);
}
e_info("online testing starting\n"); e_info("online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* Online tests aren't run; pass by default */ /* register, eeprom, intr and loopback tests not run online */
data[0] = 0; data[0] = 0;
data[1] = 0; data[1] = 0;
data[2] = 0; data[2] = 0;
data[3] = 0; data[3] = 0;
if (!if_running && (adapter->flags & FLAG_HAS_AMT)) if (e1000_link_test(adapter, &data[4]))
dev_close(netdev); eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__E1000_TESTING, &adapter->state); clear_bit(__E1000_TESTING, &adapter->state);
} }
if (!if_running) {
e1000e_reset(adapter);
if (adapter->flags & FLAG_HAS_AMT)
e1000e_release_hw_control(adapter);
}
msleep_interruptible(4 * 1000); msleep_interruptible(4 * 1000);
} }

View File

@ -83,6 +83,7 @@ enum e1e_registers {
E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
E1000_PBS = 0x01008, /* Packet Buffer Size */ E1000_PBS = 0x01008, /* Packet Buffer Size */
E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */

View File

@ -1395,22 +1395,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
} }
} }
static u32 e1000_calc_rx_da_crc(u8 mac[])
{
u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
u32 i, j, mask, crc;
crc = 0xffffffff;
for (i = 0; i < 6; i++) {
crc = crc ^ mac[i];
for (j = 8; j > 0; j--) {
mask = (crc & 1) * (-1);
crc = (crc >> 1) ^ (poly & mask);
}
}
return ~crc;
}
/** /**
* e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
* with 82579 PHY * with 82579 PHY
@ -1453,8 +1437,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
mac_addr[4] = (addr_high & 0xFF); mac_addr[4] = (addr_high & 0xFF);
mac_addr[5] = ((addr_high >> 8) & 0xFF); mac_addr[5] = ((addr_high >> 8) & 0xFF);
ew32(PCH_RAICC(i), ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
e1000_calc_rx_da_crc(mac_addr));
} }
/* Write Rx addresses to the PHY */ /* Write Rx addresses to the PHY */
@ -2977,7 +2960,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
{ {
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u16 reg; u16 reg;
u32 ctrl, icr, kab; u32 ctrl, kab;
s32 ret_val; s32 ret_val;
/* /*
@ -3067,7 +3050,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ew32(CRC_OFFSET, 0x65656565); ew32(CRC_OFFSET, 0x65656565);
ew32(IMC, 0xffffffff); ew32(IMC, 0xffffffff);
icr = er32(ICR); er32(ICR);
kab = er32(KABGTXD); kab = er32(KABGTXD);
kab |= E1000_KABGTXD_BGSQLBIAS; kab |= E1000_KABGTXD_BGSQLBIAS;
@ -3118,7 +3101,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
* Reset the phy after disabling host wakeup to reset the Rx buffer. * Reset the phy after disabling host wakeup to reset the Rx buffer.
*/ */
if (hw->phy.type == e1000_phy_82578) { if (hw->phy.type == e1000_phy_82578) {
hw->phy.ops.read_reg(hw, BM_WUC, &i); e1e_rphy(hw, BM_WUC, &i);
ret_val = e1000_phy_hw_reset_ich8lan(hw); ret_val = e1000_phy_hw_reset_ich8lan(hw);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
@ -3276,9 +3259,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
(hw->phy.type == e1000_phy_82577)) { (hw->phy.type == e1000_phy_82577)) {
ew32(FCRTV_PCH, hw->fc.refresh_time); ew32(FCRTV_PCH, hw->fc.refresh_time);
ret_val = hw->phy.ops.write_reg(hw, ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
PHY_REG(BM_PORT_CTRL_PAGE, 27), hw->fc.pause_time);
hw->fc.pause_time);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
} }
@ -3342,8 +3324,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val; return ret_val;
break; break;
case e1000_phy_ife: case e1000_phy_ife:
ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
&reg_data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
@ -3361,8 +3342,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
reg_data |= IFE_PMC_AUTO_MDIX; reg_data |= IFE_PMC_AUTO_MDIX;
break; break;
} }
ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
reg_data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
break; break;
@ -3646,7 +3626,8 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
{ {
if (hw->phy.type == e1000_phy_ife) if (hw->phy.type == e1000_phy_ife)
return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); (IFE_PSCL_PROBE_MODE |
IFE_PSCL_PROBE_LEDS_OFF));
ew32(LEDCTL, hw->mac.ledctl_mode1); ew32(LEDCTL, hw->mac.ledctl_mode1);
return 0; return 0;
@ -3660,8 +3641,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
**/ **/
static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
{ {
return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
(u16)hw->mac.ledctl_mode1);
} }
/** /**
@ -3672,8 +3652,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
**/ **/
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
{ {
return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
(u16)hw->mac.ledctl_default);
} }
/** /**
@ -3704,7 +3683,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
} }
} }
return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); return e1e_wphy(hw, HV_LED_CONFIG, data);
} }
/** /**
@ -3735,7 +3714,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
} }
} }
return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); return e1e_wphy(hw, HV_LED_CONFIG, data);
} }
/** /**
@ -3844,20 +3823,20 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
if ((hw->phy.type == e1000_phy_82578) || if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) { (hw->phy.type == e1000_phy_82577)) {
hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); e1e_rphy(hw, HV_DC_UPPER, &phy_data);
hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); e1e_rphy(hw, HV_DC_LOWER, &phy_data);
hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
} }
} }

View File

@ -1135,7 +1135,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); ret_val =
e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
if (ret_val) if (ret_val)
return ret_val; return ret_val;

View File

@ -1980,15 +1980,15 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
} }
/** /**
* e1000_get_hw_control - get control of the h/w from f/w * e1000e_get_hw_control - get control of the h/w from f/w
* @adapter: address of board private structure * @adapter: address of board private structure
* *
* e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that * For ASF and Pass Through versions of f/w this means that
* the driver is loaded. For AMT version (only with 82573) * the driver is loaded. For AMT version (only with 82573)
* of the f/w this means that the network i/f is open. * of the f/w this means that the network i/f is open.
**/ **/
static void e1000_get_hw_control(struct e1000_adapter *adapter) void e1000e_get_hw_control(struct e1000_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext; u32 ctrl_ext;
@ -2005,16 +2005,16 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
} }
/** /**
* e1000_release_hw_control - release control of the h/w to f/w * e1000e_release_hw_control - release control of the h/w to f/w
* @adapter: address of board private structure * @adapter: address of board private structure
* *
* e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that the * For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i * driver is no longer loaded. For AMT version (only with 82573) i
* of the f/w this means that the network i/f is closed. * of the f/w this means that the network i/f is closed.
* *
**/ **/
static void e1000_release_hw_control(struct e1000_adapter *adapter) void e1000e_release_hw_control(struct e1000_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext; u32 ctrl_ext;
@ -2445,7 +2445,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id)) { (vid == adapter->mng_vlan_id)) {
/* release control to f/w */ /* release control to f/w */
e1000_release_hw_control(adapter); e1000e_release_hw_control(adapter);
return; return;
} }
@ -2734,6 +2734,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
else else
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
if (ret_val)
e_dbg("failed to enable jumbo frame workaround mode\n");
} }
/* Program MC offset vector base */ /* Program MC offset vector base */
@ -3184,7 +3187,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(PBA, pba); ew32(PBA, pba);
} }
/* /*
* flow control settings * flow control settings
* *
@ -3272,7 +3274,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
* that the network interface is in control * that the network interface is in control
*/ */
if (adapter->flags & FLAG_HAS_AMT) if (adapter->flags & FLAG_HAS_AMT)
e1000_get_hw_control(adapter); e1000e_get_hw_control(adapter);
ew32(WUC, 0); ew32(WUC, 0);
@ -3285,6 +3287,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(VET, ETH_P_8021Q); ew32(VET, ETH_P_8021Q);
e1000e_reset_adaptive(hw); e1000e_reset_adaptive(hw);
if (!netif_running(adapter->netdev) &&
!test_bit(__E1000_TESTING, &adapter->state)) {
e1000_power_down_phy(adapter);
return;
}
e1000_get_phy_info(hw); e1000_get_phy_info(hw);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
@ -3570,7 +3579,7 @@ static int e1000_open(struct net_device *netdev)
* interface is now open and reset the part to a known state. * interface is now open and reset the part to a known state.
*/ */
if (adapter->flags & FLAG_HAS_AMT) { if (adapter->flags & FLAG_HAS_AMT) {
e1000_get_hw_control(adapter); e1000e_get_hw_control(adapter);
e1000e_reset(adapter); e1000e_reset(adapter);
} }
@ -3634,7 +3643,7 @@ static int e1000_open(struct net_device *netdev)
return 0; return 0;
err_req_irq: err_req_irq:
e1000_release_hw_control(adapter); e1000e_release_hw_control(adapter);
e1000_power_down_phy(adapter); e1000_power_down_phy(adapter);
e1000e_free_rx_resources(adapter); e1000e_free_rx_resources(adapter);
err_setup_rx: err_setup_rx:
@ -3689,8 +3698,9 @@ static int e1000_close(struct net_device *netdev)
* If AMT is enabled, let the firmware know that the network * If AMT is enabled, let the firmware know that the network
* interface is now closed * interface is now closed
*/ */
if (adapter->flags & FLAG_HAS_AMT) if ((adapter->flags & FLAG_HAS_AMT) &&
e1000_release_hw_control(adapter); !test_bit(__E1000_TESTING, &adapter->state))
e1000e_release_hw_control(adapter);
if ((adapter->flags & FLAG_HAS_ERT) || if ((adapter->flags & FLAG_HAS_ERT) ||
(adapter->hw.mac.type == e1000_pch2lan)) (adapter->hw.mac.type == e1000_pch2lan))
@ -5209,7 +5219,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
* Release control of h/w to f/w. If f/w is AMT enabled, this * Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. * would have already happened in close and is redundant.
*/ */
e1000_release_hw_control(adapter); e1000e_release_hw_control(adapter);
pci_disable_device(pdev); pci_disable_device(pdev);
@ -5366,7 +5376,7 @@ static int __e1000_resume(struct pci_dev *pdev)
* under the control of the driver. * under the control of the driver.
*/ */
if (!(adapter->flags & FLAG_HAS_AMT)) if (!(adapter->flags & FLAG_HAS_AMT))
e1000_get_hw_control(adapter); e1000e_get_hw_control(adapter);
return 0; return 0;
} }
@ -5613,7 +5623,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
* under the control of the driver. * under the control of the driver.
*/ */
if (!(adapter->flags & FLAG_HAS_AMT)) if (!(adapter->flags & FLAG_HAS_AMT))
e1000_get_hw_control(adapter); e1000e_get_hw_control(adapter);
} }
@ -5636,7 +5646,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
ret_val = e1000_read_pba_string_generic(hw, pba_str, ret_val = e1000_read_pba_string_generic(hw, pba_str,
E1000_PBANUM_LENGTH); E1000_PBANUM_LENGTH);
if (ret_val) if (ret_val)
strcpy(pba_str, "Unknown"); strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
e_info("MAC: %d, PHY: %d, PBA No: %s\n", e_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, pba_str); hw->mac.type, hw->phy.type, pba_str);
} }
@ -5963,9 +5973,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
* under the control of the driver. * under the control of the driver.
*/ */
if (!(adapter->flags & FLAG_HAS_AMT)) if (!(adapter->flags & FLAG_HAS_AMT))
e1000_get_hw_control(adapter); e1000e_get_hw_control(adapter);
strcpy(netdev->name, "eth%d"); strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
err = register_netdev(netdev); err = register_netdev(netdev);
if (err) if (err)
goto err_register; goto err_register;
@ -5982,12 +5992,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err_register: err_register:
if (!(adapter->flags & FLAG_HAS_AMT)) if (!(adapter->flags & FLAG_HAS_AMT))
e1000_release_hw_control(adapter); e1000e_release_hw_control(adapter);
err_eeprom: err_eeprom:
if (!e1000_check_reset_block(&adapter->hw)) if (!e1000_check_reset_block(&adapter->hw))
e1000_phy_hw_reset(&adapter->hw); e1000_phy_hw_reset(&adapter->hw);
err_hw_init: err_hw_init:
kfree(adapter->tx_ring); kfree(adapter->tx_ring);
kfree(adapter->rx_ring); kfree(adapter->rx_ring);
err_sw_init: err_sw_init:
@ -6053,7 +6062,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
* Release control of h/w to f/w. If f/w is AMT enabled, this * Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. * would have already happened in close and is redundant.
*/ */
e1000_release_hw_control(adapter); e1000e_release_hw_control(adapter);
e1000e_reset_interrupt_capability(adapter); e1000e_reset_interrupt_capability(adapter);
kfree(adapter->tx_ring); kfree(adapter->tx_ring);

View File

@ -637,12 +637,11 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
**/ **/
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
{ {
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val; s32 ret_val;
u16 phy_data; u16 phy_data;
/* Enable CRS on TX. This must be set for half-duplex operation. */ /* Enable CRS on TX. This must be set for half-duplex operation. */
ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
if (ret_val) if (ret_val)
goto out; goto out;
@ -651,7 +650,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
/* Enable downshift */ /* Enable downshift */
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
out: out:
return ret_val; return ret_val;
@ -774,16 +773,14 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
} }
if (phy->type == e1000_phy_82578) { if (phy->type == e1000_phy_82578) {
ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
&phy_data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
/* 82578 PHY - set the downshift count to 1x. */ /* 82578 PHY - set the downshift count to 1x. */
phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
phy_data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
} }
@ -1319,9 +1316,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
* We didn't get link. * We didn't get link.
* Reset the DSP and cross our fingers. * Reset the DSP and cross our fingers.
*/ */
ret_val = e1e_wphy(hw, ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
M88E1000_PHY_PAGE_SELECT, 0x001d);
0x001d);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
ret_val = e1000e_phy_reset_dsp(hw); ret_val = e1000e_phy_reset_dsp(hw);
@ -3071,12 +3067,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
goto out; goto out;
/* Do not apply workaround if in PHY loopback bit 14 set */ /* Do not apply workaround if in PHY loopback bit 14 set */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); e1e_rphy(hw, PHY_CONTROL, &data);
if (data & PHY_CONTROL_LB) if (data & PHY_CONTROL_LB)
goto out; goto out;
/* check if link is up and at 1Gbps */ /* check if link is up and at 1Gbps */
ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
if (ret_val) if (ret_val)
goto out; goto out;
@ -3092,14 +3088,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
mdelay(200); mdelay(200);
/* flush the packets in the fifo buffer */ /* flush the packets in the fifo buffer */
ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
HV_MUX_DATA_CTRL_FORCE_SPEED);
if (ret_val) if (ret_val)
goto out; goto out;
ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
HV_MUX_DATA_CTRL_GEN_TO_MAC);
out: out:
return ret_val; return ret_val;
@ -3119,7 +3113,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
s32 ret_val; s32 ret_val;
u16 data; u16 data;
ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val) if (!ret_val)
phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@ -3142,13 +3136,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
u16 phy_data; u16 phy_data;
bool link; bool link;
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
if (ret_val) if (ret_val)
goto out; goto out;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data); e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
if (ret_val) if (ret_val)
goto out; goto out;
@ -3212,7 +3206,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val) if (ret_val)
goto out; goto out;
ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (ret_val) if (ret_val)
goto out; goto out;
@ -3224,7 +3218,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val) if (ret_val)
goto out; goto out;
ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
if (ret_val) if (ret_val)
goto out; goto out;
@ -3258,7 +3252,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
s32 ret_val; s32 ret_val;
u16 phy_data, length; u16 phy_data, length;
ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
if (ret_val) if (ret_val)
goto out; goto out;

View File

@ -40,7 +40,7 @@
#include <asm/io.h> #include <asm/io.h>
#define DRV_NAME "ehea" #define DRV_NAME "ehea"
#define DRV_VERSION "EHEA_0106" #define DRV_VERSION "EHEA_0107"
/* eHEA capability flags */ /* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1 #define DLPAR_PORT_ADD_REM 1

View File

@ -437,7 +437,7 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
} }
} }
/* Ring doorbell */ /* Ring doorbell */
ehea_update_rq1a(pr->qp, i); ehea_update_rq1a(pr->qp, i - 1);
} }
static int ehea_refill_rq_def(struct ehea_port_res *pr, static int ehea_refill_rq_def(struct ehea_port_res *pr,
@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr)
int ret; int ret;
struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
- init_attr->act_nr_rwqes_rq2
- init_attr->act_nr_rwqes_rq3 - 1);
ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);

View File

@ -17,6 +17,8 @@
* *
* Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
* Copyright (c) 2004-2006 Macq Electronique SA. * Copyright (c) 2004-2006 Macq Electronique SA.
*
* Copyright (C) 2010 Freescale Semiconductor, Inc.
*/ */
#include <linux/module.h> #include <linux/module.h>
@ -45,29 +47,41 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#ifndef CONFIG_ARCH_MXC #ifndef CONFIG_ARM
#include <asm/coldfire.h> #include <asm/coldfire.h>
#include <asm/mcfsim.h> #include <asm/mcfsim.h>
#endif #endif
#include "fec.h" #include "fec.h"
#ifdef CONFIG_ARCH_MXC #if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
#include <mach/hardware.h>
#define FEC_ALIGNMENT 0xf #define FEC_ALIGNMENT 0xf
#else #else
#define FEC_ALIGNMENT 0x3 #define FEC_ALIGNMENT 0x3
#endif #endif
/* #define DRIVER_NAME "fec"
* Define the fixed address of the FEC hardware.
*/
#if defined(CONFIG_M5272)
static unsigned char fec_mac_default[] = { /* Controller is ENET-MAC */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, #define FEC_QUIRK_ENET_MAC (1 << 0)
/* Controller needs driver to swap frame */
#define FEC_QUIRK_SWAP_FRAME (1 << 1)
static struct platform_device_id fec_devtype[] = {
{
.name = DRIVER_NAME,
.driver_data = 0,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
}
}; };
static unsigned char macaddr[ETH_ALEN];
module_param_array(macaddr, byte, NULL, 0);
MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#if defined(CONFIG_M5272)
/* /*
* Some hardware gets it MAC address out of local flash memory. * Some hardware gets it MAC address out of local flash memory.
* if this is non-zero then assume it is the address to get MAC from. * if this is non-zero then assume it is the address to get MAC from.
@ -133,7 +147,8 @@ static unsigned char fec_mac_default[] = {
* account when setting it. * account when setting it.
*/ */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else #else
#define OPT_FRAME_SIZE 0 #define OPT_FRAME_SIZE 0
@ -186,7 +201,6 @@ struct fec_enet_private {
int mii_timeout; int mii_timeout;
uint phy_speed; uint phy_speed;
phy_interface_t phy_interface; phy_interface_t phy_interface;
int index;
int link; int link;
int full_duplex; int full_duplex;
struct completion mdio_done; struct completion mdio_done;
@ -213,10 +227,23 @@ static void fec_stop(struct net_device *dev);
/* Transmitter timeout */ /* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ) #define TX_TIMEOUT (2 * HZ)
static void *swap_buffer(void *bufaddr, int len)
{
int i;
unsigned int *buf = bufaddr;
for (i = 0; i < (len + 3) / 4; i++, buf++)
*buf = cpu_to_be32(*buf);
return bufaddr;
}
static netdev_tx_t static netdev_tx_t
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct fec_enet_private *fep = netdev_priv(dev); struct fec_enet_private *fep = netdev_priv(dev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc *bdp; struct bufdesc *bdp;
void *bufaddr; void *bufaddr;
unsigned short status; unsigned short status;
@ -261,6 +288,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
bufaddr = fep->tx_bounce[index]; bufaddr = fep->tx_bounce[index];
} }
/*
* Some design made an incorrect assumption on endian mode of
* the system that it's running on. As the result, driver has to
* swap every frame going to and coming from the controller.
*/
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, skb->len);
/* Save skb pointer */ /* Save skb pointer */
fep->tx_skbuff[fep->skb_cur] = skb; fep->tx_skbuff[fep->skb_cur] = skb;
@ -429,6 +464,8 @@ static void
fec_enet_rx(struct net_device *dev) fec_enet_rx(struct net_device *dev)
{ {
struct fec_enet_private *fep = netdev_priv(dev); struct fec_enet_private *fep = netdev_priv(dev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc *bdp; struct bufdesc *bdp;
unsigned short status; unsigned short status;
struct sk_buff *skb; struct sk_buff *skb;
@ -492,6 +529,9 @@ fec_enet_rx(struct net_device *dev)
dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, pkt_len);
/* This does 16 byte alignment, exactly what we need. /* This does 16 byte alignment, exactly what we need.
* The packet length includes FCS, but we don't want to * The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up * include that when passing upstream as it messes up
@ -538,37 +578,50 @@ rx_processing_done:
} }
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
#ifdef CONFIG_M5272
static void __inline__ fec_get_mac(struct net_device *dev) static void __inline__ fec_get_mac(struct net_device *dev)
{ {
struct fec_enet_private *fep = netdev_priv(dev); struct fec_enet_private *fep = netdev_priv(dev);
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
unsigned char *iap, tmpaddr[ETH_ALEN]; unsigned char *iap, tmpaddr[ETH_ALEN];
if (FEC_FLASHMAC) { /*
/* * try to get mac address in following order:
* Get MAC address from FLASH. *
* If it is all 1's or 0's, use the default. * 1) module parameter via kernel command line in form
*/ * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
iap = (unsigned char *)FEC_FLASHMAC; */
if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && iap = macaddr;
(iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
iap = fec_mac_default; /*
if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && * 2) from flash or fuse (via platform data)
(iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) */
iap = fec_mac_default; if (!is_valid_ether_addr(iap)) {
} else { #ifdef CONFIG_M5272
*((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); if (FEC_FLASHMAC)
*((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); iap = (unsigned char *)FEC_FLASHMAC;
#else
if (pdata)
memcpy(iap, pdata->mac, ETH_ALEN);
#endif
}
/*
* 3) FEC mac registers set by bootloader
*/
if (!is_valid_ether_addr(iap)) {
*((unsigned long *) &tmpaddr[0]) =
be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
*((unsigned short *) &tmpaddr[4]) =
be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
iap = &tmpaddr[0]; iap = &tmpaddr[0];
} }
memcpy(dev->dev_addr, iap, ETH_ALEN); memcpy(dev->dev_addr, iap, ETH_ALEN);
/* Adjust MAC if using default MAC address */ /* Adjust MAC if using macaddr */
if (iap == fec_mac_default) if (iap == macaddr)
dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
} }
#endif
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
@ -651,8 +704,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
fep->mii_timeout = 0; fep->mii_timeout = 0;
init_completion(&fep->mdio_done); init_completion(&fep->mdio_done);
/* start a read op */ /* start a write op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
FEC_MMFR_TA | FEC_MMFR_DATA(value), FEC_MMFR_TA | FEC_MMFR_DATA(value),
fep->hwp + FEC_MII_DATA); fep->hwp + FEC_MII_DATA);
@ -681,6 +734,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
char mdio_bus_id[MII_BUS_ID_SIZE]; char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3]; char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id; int phy_id;
int dev_id = fep->pdev->id;
fep->phy_dev = NULL; fep->phy_dev = NULL;
@ -692,6 +746,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
continue; continue;
if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
continue; continue;
if (dev_id--)
continue;
strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break; break;
} }
@ -729,10 +785,35 @@ static int fec_enet_mii_probe(struct net_device *dev)
static int fec_enet_mii_init(struct platform_device *pdev) static int fec_enet_mii_init(struct platform_device *pdev)
{ {
static struct mii_bus *fec0_mii_bus;
struct net_device *dev = platform_get_drvdata(pdev); struct net_device *dev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(dev); struct fec_enet_private *fep = netdev_priv(dev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
int err = -ENXIO, i; int err = -ENXIO, i;
/*
* The dual fec interfaces are not equivalent with enet-mac.
* Here are the differences:
*
* - fec0 supports MII & RMII modes while fec1 only supports RMII
* - fec0 acts as the 1588 time master while fec1 is slave
* - external phys can only be configured by fec0
*
* That is to say fec1 can not work independently. It only works
* when fec0 is working. The reason behind this design is that the
* second interface is added primarily for Switch mode.
*
* Because of the last point above, both phys are attached on fec0
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
/* fec1 uses fec0 mii_bus */
fep->mii_bus = fec0_mii_bus;
return 0;
}
fep->mii_timeout = 0; fep->mii_timeout = 0;
/* /*
@ -769,6 +850,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (mdiobus_register(fep->mii_bus)) if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq; goto err_out_free_mdio_irq;
/* save fec0 mii_bus */
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fec0_mii_bus = fep->mii_bus;
return 0; return 0;
err_out_free_mdio_irq: err_out_free_mdio_irq:
@ -1067,9 +1152,8 @@ static const struct net_device_ops fec_netdev_ops = {
/* /*
* XXX: We need to clean up on failure exits here. * XXX: We need to clean up on failure exits here.
* *
* index is only used in legacy code
*/ */
static int fec_enet_init(struct net_device *dev, int index) static int fec_enet_init(struct net_device *dev)
{ {
struct fec_enet_private *fep = netdev_priv(dev); struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *cbd_base; struct bufdesc *cbd_base;
@ -1086,26 +1170,11 @@ static int fec_enet_init(struct net_device *dev, int index)
spin_lock_init(&fep->hw_lock); spin_lock_init(&fep->hw_lock);
fep->index = index;
fep->hwp = (void __iomem *)dev->base_addr; fep->hwp = (void __iomem *)dev->base_addr;
fep->netdev = dev; fep->netdev = dev;
/* Set the Ethernet address */ /* Get the Ethernet address */
#ifdef CONFIG_M5272
fec_get_mac(dev); fec_get_mac(dev);
#else
{
unsigned long l;
l = readl(fep->hwp + FEC_ADDR_LOW);
dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
l = readl(fep->hwp + FEC_ADDR_HIGH);
dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
}
#endif
/* Set receive and transmit descriptor base. */ /* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base; fep->rx_bd_base = cbd_base;
@ -1156,12 +1225,25 @@ static void
fec_restart(struct net_device *dev, int duplex) fec_restart(struct net_device *dev, int duplex)
{ {
struct fec_enet_private *fep = netdev_priv(dev); struct fec_enet_private *fep = netdev_priv(dev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
int i; int i;
u32 val, temp_mac[2];
/* Whack a reset. We should wait for this. */ /* Whack a reset. We should wait for this. */
writel(1, fep->hwp + FEC_ECNTRL); writel(1, fep->hwp + FEC_ECNTRL);
udelay(10); udelay(10);
/*
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
}
/* Clear any outstanding interrupt. */ /* Clear any outstanding interrupt. */
writel(0xffc00000, fep->hwp + FEC_IEVENT); writel(0xffc00000, fep->hwp + FEC_IEVENT);
@ -1208,20 +1290,45 @@ fec_restart(struct net_device *dev, int duplex)
/* Set MII speed */ /* Set MII speed */
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
/*
* The phy interface and speed need to get configured
* differently on enet-mac.
*/
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
val = readl(fep->hwp + FEC_R_CNTRL);
/* MII or RMII */
if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
val |= (1 << 8);
else
val &= ~(1 << 8);
/* 10M or 100M */
if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
val &= ~(1 << 9);
else
val |= (1 << 9);
writel(val, fep->hwp + FEC_R_CNTRL);
} else {
#ifdef FEC_MIIGSK_ENR #ifdef FEC_MIIGSK_ENR
if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
/* disable the gasket and wait */ /* disable the gasket and wait */
writel(0, fep->hwp + FEC_MIIGSK_ENR); writel(0, fep->hwp + FEC_MIIGSK_ENR);
while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
udelay(1); udelay(1);
/* configure the gasket: RMII, 50 MHz, no loopback, no echo */ /*
writel(1, fep->hwp + FEC_MIIGSK_CFGR); * configure the gasket:
* RMII, 50 MHz, no loopback, no echo
*/
writel(1, fep->hwp + FEC_MIIGSK_CFGR);
/* re-enable the gasket */ /* re-enable the gasket */
writel(2, fep->hwp + FEC_MIIGSK_ENR); writel(2, fep->hwp + FEC_MIIGSK_ENR);
} }
#endif #endif
}
/* And last, enable the transmit and receive processing */ /* And last, enable the transmit and receive processing */
writel(2, fep->hwp + FEC_ECNTRL); writel(2, fep->hwp + FEC_ECNTRL);
@ -1316,7 +1423,7 @@ fec_probe(struct platform_device *pdev)
} }
clk_enable(fep->clk); clk_enable(fep->clk);
ret = fec_enet_init(ndev, 0); ret = fec_enet_init(ndev);
if (ret) if (ret)
goto failed_init; goto failed_init;
@ -1380,8 +1487,10 @@ fec_suspend(struct device *dev)
if (ndev) { if (ndev) {
fep = netdev_priv(ndev); fep = netdev_priv(ndev);
if (netif_running(ndev)) if (netif_running(ndev)) {
fec_enet_close(ndev); fec_stop(ndev);
netif_device_detach(ndev);
}
clk_disable(fep->clk); clk_disable(fep->clk);
} }
return 0; return 0;
@ -1396,8 +1505,10 @@ fec_resume(struct device *dev)
if (ndev) { if (ndev) {
fep = netdev_priv(ndev); fep = netdev_priv(ndev);
clk_enable(fep->clk); clk_enable(fep->clk);
if (netif_running(ndev)) if (netif_running(ndev)) {
fec_enet_open(ndev); fec_restart(ndev, fep->full_duplex);
netif_device_attach(ndev);
}
} }
return 0; return 0;
} }
@ -1414,12 +1525,13 @@ static const struct dev_pm_ops fec_pm_ops = {
static struct platform_driver fec_driver = { static struct platform_driver fec_driver = {
.driver = { .driver = {
.name = "fec", .name = DRIVER_NAME,
.owner = THIS_MODULE, .owner = THIS_MODULE,
#ifdef CONFIG_PM #ifdef CONFIG_PM
.pm = &fec_pm_ops, .pm = &fec_pm_ops,
#endif #endif
}, },
.id_table = fec_devtype,
.probe = fec_probe, .probe = fec_probe,
.remove = __devexit_p(fec_drv_remove), .remove = __devexit_p(fec_drv_remove),
}; };

View File

@ -14,7 +14,8 @@
/****************************************************************************/ /****************************************************************************/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
/* /*
* Just figures, Motorola would have to change the offsets for * Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models * registers in the same peripheral device on different models
@ -78,7 +79,7 @@
/* /*
* Define the buffer descriptor structure. * Define the buffer descriptor structure.
*/ */
#ifdef CONFIG_ARCH_MXC #if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
struct bufdesc { struct bufdesc {
unsigned short cbd_datlen; /* Data length */ unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */ unsigned short cbd_sc; /* Control and status info */

View File

@ -3949,6 +3949,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
writel(flags, base + NvRegWakeUpFlags); writel(flags, base + NvRegWakeUpFlags);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
} }
device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
return 0; return 0;
} }
@ -5488,14 +5489,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* set mac address */ /* set mac address */
nv_copy_mac_to_hw(dev); nv_copy_mac_to_hw(dev);
/* Workaround current PCI init glitch: wakeup bits aren't
* being set from PCI PM capability.
*/
device_init_wakeup(&pci_dev->dev, 1);
/* disable WOL */ /* disable WOL */
writel(0, base + NvRegWakeUpFlags); writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0; np->wolenabled = 0;
device_set_wakeup_enable(&pci_dev->dev, false);
if (id->driver_data & DEV_HAS_POWER_CNTRL) { if (id->driver_data & DEV_HAS_POWER_CNTRL) {
@ -5746,8 +5743,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
} }
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int nv_suspend(struct pci_dev *pdev, pm_message_t state) static int nv_suspend(struct device *device)
{ {
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct fe_priv *np = netdev_priv(dev); struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev); u8 __iomem *base = get_hwbase(dev);
@ -5763,25 +5761,17 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
for (i = 0; i <= np->register_size/sizeof(u32); i++) for (i = 0; i <= np->register_size/sizeof(u32); i++)
np->saved_config_space[i] = readl(base + i*sizeof(u32)); np->saved_config_space[i] = readl(base + i*sizeof(u32));
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0; return 0;
} }
static int nv_resume(struct pci_dev *pdev) static int nv_resume(struct device *device)
{ {
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct fe_priv *np = netdev_priv(dev); struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev); u8 __iomem *base = get_hwbase(dev);
int i, rc = 0; int i, rc = 0;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/* ack any pending wake events, disable PME */
pci_enable_wake(pdev, PCI_D0, 0);
/* restore non-pci configuration space */ /* restore non-pci configuration space */
for (i = 0; i <= np->register_size/sizeof(u32); i++) for (i = 0; i <= np->register_size/sizeof(u32); i++)
writel(np->saved_config_space[i], base+i*sizeof(u32)); writel(np->saved_config_space[i], base+i*sizeof(u32));
@ -5800,6 +5790,9 @@ static int nv_resume(struct pci_dev *pdev)
return rc; return rc;
} }
static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
#define NV_PM_OPS (&nv_pm_ops)
static void nv_shutdown(struct pci_dev *pdev) static void nv_shutdown(struct pci_dev *pdev)
{ {
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
@ -5822,15 +5815,13 @@ static void nv_shutdown(struct pci_dev *pdev)
* only put the device into D3 if we really go for poweroff. * only put the device into D3 if we really go for poweroff.
*/ */
if (system_state == SYSTEM_POWER_OFF) { if (system_state == SYSTEM_POWER_OFF) {
if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) pci_wake_from_d3(pdev, np->wolenabled);
pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
pci_set_power_state(pdev, PCI_D3hot); pci_set_power_state(pdev, PCI_D3hot);
} }
} }
#else #else
#define nv_suspend NULL #define NV_PM_OPS NULL
#define nv_shutdown NULL #define nv_shutdown NULL
#define nv_resume NULL
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
@ -6002,9 +5993,8 @@ static struct pci_driver driver = {
.id_table = pci_tbl, .id_table = pci_tbl,
.probe = nv_probe, .probe = nv_probe,
.remove = __devexit_p(nv_remove), .remove = __devexit_p(nv_remove),
.suspend = nv_suspend,
.resume = nv_resume,
.shutdown = nv_shutdown, .shutdown = nv_shutdown,
.driver.pm = NV_PM_OPS,
}; };
static int __init init_nic(void) static int __init init_nic(void)

View File

@ -396,7 +396,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
while (p) { while (p) {
if (p->bitrate == bitrate) { if (p->bitrate == bitrate) {
memcpy(p->bits, bits, YAM_FPGA_SIZE); memcpy(p->bits, bits, YAM_FPGA_SIZE);
return p->bits; goto out;
} }
p = p->next; p = p->next;
} }
@ -411,7 +411,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
p->bitrate = bitrate; p->bitrate = bitrate;
p->next = yam_data; p->next = yam_data;
yam_data = p; yam_data = p;
out:
release_firmware(fw); release_firmware(fw);
return p->bits; return p->bits;
} }

View File

@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
extern void ixgbe_free_tx_resources(struct ixgbe_ring *); extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
@ -524,26 +526,13 @@ extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input, union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue); u8 queue);
extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input, union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks, struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue); u16 soft_id, u8 queue);
extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
u16 vlan_id);
extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
u32 src_addr);
extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
u32 dst_addr);
extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
u16 src_port);
extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
u16 dst_port);
extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
u16 flex_byte);
extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
u8 l4type);
extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring); struct ixgbe_ring *ring);
extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,

View File

@ -1003,7 +1003,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
udelay(10); udelay(10);
} }
if (i >= IXGBE_FDIRCMD_CMD_POLL) { if (i >= IXGBE_FDIRCMD_CMD_POLL) {
hw_dbg(hw ,"Flow Director previous command isn't complete, " hw_dbg(hw, "Flow Director previous command isn't complete, "
"aborting table re-initialization.\n"); "aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED; return IXGBE_ERR_FDIR_REINIT_FAILED;
} }
@ -1113,13 +1113,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Move the flexible bytes to use the ethertype - shift 6 words */ /* Move the flexible bytes to use the ethertype - shift 6 words */
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
/* Prime the keys for hashing */ /* Prime the keys for hashing */
IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
htonl(IXGBE_ATR_BUCKET_HASH_KEY)); IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
/* /*
* Poll init-done after we write the register. Estimated times: * Poll init-done after we write the register. Estimated times:
@ -1209,10 +1206,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
/* Prime the keys for hashing */ /* Prime the keys for hashing */
IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
htonl(IXGBE_ATR_BUCKET_HASH_KEY)); IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
/* /*
* Poll init-done after we write the register. Estimated times: * Poll init-done after we write the register. Estimated times:
@ -1251,8 +1246,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
* @stream: input bitstream to compute the hash on * @stream: input bitstream to compute the hash on
* @key: 32-bit hash key * @key: 32-bit hash key
**/ **/
static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
u32 key) u32 key)
{ {
/* /*
* The algorithm is as follows: * The algorithm is as follows:
@ -1272,409 +1267,249 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
* To simplify for programming, the algorithm is implemented * To simplify for programming, the algorithm is implemented
* in software this way: * in software this way:
* *
* Key[31:0], Stream[335:0] * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
* *
* tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times * for (i = 0; i < 352; i+=32)
* int_key[350:0] = tmp_key[351:1] * hi_hash_dword[31:0] ^= Stream[(i+31):i];
* int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
* *
* hash[15:0] = 0; * lo_hash_dword[15:0] ^= Stream[15:0];
* for (i = 0; i < 351; i++) { * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
* if (int_key[i]) * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
* hash ^= int_stream[(i + 15):i]; *
* hi_hash_dword[31:0] ^= Stream[351:320];
*
* if(key[0])
* hash[15:0] ^= Stream[15:0];
*
* for (i = 0; i < 16; i++) {
* if (key[i])
* hash[15:0] ^= lo_hash_dword[(i+15):i];
* if (key[i + 16])
* hash[15:0] ^= hi_hash_dword[(i+15):i];
* } * }
*
*/ */
__be32 common_hash_dword = 0;
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
u32 hash_result = 0;
u8 i;
union { /* record the flow_vm_vlan bits as they are a key part to the hash */
u64 fill[6]; flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
u32 key[11];
u8 key_stream[44];
} tmp_key;
u8 *stream = (u8 *)atr_input; /* generate common hash dword */
u8 int_key[44]; /* upper-most bit unused */ for (i = 10; i; i -= 2)
u8 hash_str[46]; /* upper-most 2 bits unused */ common_hash_dword ^= atr_input->dword_stream[i] ^
u16 hash_result = 0; atr_input->dword_stream[i - 1];
int i, j, k, h;
hi_hash_dword = ntohl(common_hash_dword);
/* low dword is word swapped version of common */
lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
/* apply flow ID/VM pool/VLAN ID bits to hash words */
hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
/* Process bits 0 and 16 */
if (key & 0x0001) hash_result ^= lo_hash_dword;
if (key & 0x00010000) hash_result ^= hi_hash_dword;
/* /*
* Initialize the fill member to prevent warnings * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
* on some compilers * delay this because bit 0 of the stream should not be processed
* so we do not add the vlan until after bit 0 was processed
*/ */
tmp_key.fill[0] = 0; lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
/* First load the temporary key stream */
for (i = 0; i < 6; i++) { /* process the remaining 30 bits in the key 2 bits at a time */
u64 fillkey = ((u64)key << 32) | key; for (i = 15; i; i-- ) {
tmp_key.fill[i] = fillkey; if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
} }
/* return hash_result & IXGBE_ATR_HASH_MASK;
* Set the interim key for the hashing. Bit 352 is unused, so we must
* shift and compensate when building the key.
*/
int_key[0] = tmp_key.key_stream[0] >> 1;
for (i = 1, j = 0; i < 44; i++) {
unsigned int this_key = tmp_key.key_stream[j] << 7;
j++;
int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
}
/*
* Set the interim bit string for the hashing. Bits 368 and 367 are
* unused, so shift and compensate when building the string.
*/
hash_str[0] = (stream[40] & 0x7f) >> 1;
for (i = 1, j = 40; i < 46; i++) {
unsigned int this_str = stream[j] << 7;
j++;
if (j > 41)
j = 0;
hash_str[i] = (u8)(this_str | (stream[j] >> 1));
}
/*
* Now compute the hash. i is the index into hash_str, j is into our
* key stream, k is counting the number of bits, and h interates within
* each byte.
*/
for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
for (h = 0; h < 8 && k < 351; h++, k++) {
if (int_key[j] & (1 << h)) {
/*
* Key bit is set, XOR in the current 16-bit
* string. Example of processing:
* h = 0,
* tmp = (hash_str[i - 2] & 0 << 16) |
* (hash_str[i - 1] & 0xff << 8) |
* (hash_str[i] & 0xff >> 0)
* So tmp = hash_str[15 + k:k], since the
* i + 2 clause rolls off the 16-bit value
* h = 7,
* tmp = (hash_str[i - 2] & 0x7f << 9) |
* (hash_str[i - 1] & 0xff << 1) |
* (hash_str[i] & 0x80 >> 7)
*/
int tmp = (hash_str[i] >> h);
tmp |= (hash_str[i - 1] << (8 - h));
tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
<< (16 - h);
hash_result ^= (u16)tmp;
}
}
}
return hash_result;
} }
/** /*
* ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream * These defines allow us to quickly generate all of the necessary instructions
* @input: input stream to modify * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
* @vlan: the VLAN id to load * for values 0 through 15
**/ */
s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan) #define IXGBE_ATR_COMMON_HASH_KEY \
{ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8; #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff; do { \
u32 n = (_n); \
return 0; if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
} common_hash ^= lo_hash_dword >> n; \
else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
bucket_hash ^= lo_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
sig_hash ^= lo_hash_dword << (16 - n); \
if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
common_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
bucket_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
sig_hash ^= hi_hash_dword << (16 - n); \
} while (0);
/** /**
* ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
* @input: input stream to modify * @stream: input bitstream to compute the hash on
* @src_addr: the IP address to load
**/
s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
{
input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
(src_addr >> 16) & 0xff;
input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
(src_addr >> 8) & 0xff;
input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
return 0;
}
/**
* ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
* @input: input stream to modify
* @dst_addr: the IP address to load
**/
s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
{
input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
(dst_addr >> 16) & 0xff;
input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
(dst_addr >> 8) & 0xff;
input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
return 0;
}
/**
* ixgbe_atr_set_src_port_82599 - Sets the source port
* @input: input stream to modify
* @src_port: the source port to load
**/
s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
{
input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
return 0;
}
/**
* ixgbe_atr_set_dst_port_82599 - Sets the destination port
* @input: input stream to modify
* @dst_port: the destination port to load
**/
s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
{
input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
return 0;
}
/**
* ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
* @input: input stream to modify
* @flex_bytes: the flexible bytes to load
**/
s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
{
input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
return 0;
}
/**
* ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
* @input: input stream to modify
* @l4type: the layer 4 type value to load
**/
s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
{
input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
return 0;
}
/**
* ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
* @input: input stream to search
* @vlan: the VLAN id to load
**/
static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
{
*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
return 0;
}
/**
* ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
* @input: input stream to search
* @src_addr: the IP address to load
**/
static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
u32 *src_addr)
{
*src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
return 0;
}
/**
* ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
* @input: input stream to search
* @dst_addr: the IP address to load
**/
static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
u32 *dst_addr)
{
*dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
return 0;
}
/**
* ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
* @input: input stream to search
* @src_addr_1: the first 4 bytes of the IP address to load
* @src_addr_2: the second 4 bytes of the IP address to load
* @src_addr_3: the third 4 bytes of the IP address to load
* @src_addr_4: the fourth 4 bytes of the IP address to load
**/
static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
u32 *src_addr_1, u32 *src_addr_2,
u32 *src_addr_3, u32 *src_addr_4)
{
*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
return 0;
}
/**
* ixgbe_atr_get_src_port_82599 - Gets the source port
* @input: input stream to modify
* @src_port: the source port to load
* *
* Even though the input is given in big-endian, the FDIRPORT registers * This function is almost identical to the function above but contains
* expect the ports to be programmed in little-endian. Hence the need to swap * several optomizations such as unwinding all of the loops, letting the
* endianness when retrieving the data. This can be confusing since the * compiler work out all of the conditional ifs since the keys are static
* internal hash engine expects it to be big-endian. * defines, and computing two keys at once since the hashed dword stream
* will be the same for both keys.
**/ **/
static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
u16 *src_port) union ixgbe_atr_hash_dword common)
{ {
*src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
*src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
return 0; /* record the flow_vm_vlan bits as they are a key part to the hash */
} flow_vm_vlan = ntohl(input.dword);
/** /* generate common hash dword */
* ixgbe_atr_get_dst_port_82599 - Gets the destination port hi_hash_dword = ntohl(common.dword);
* @input: input stream to modify
* @dst_port: the destination port to load
*
* Even though the input is given in big-endian, the FDIRPORT registers
* expect the ports to be programmed in little-endian. Hence the need to swap
* endianness when retrieving the data. This can be confusing since the
* internal hash engine expects it to be big-endian.
**/
static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
u16 *dst_port)
{
*dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
*dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
return 0; /* low dword is word swapped version of common */
} lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
/** /* apply flow ID/VM pool/VLAN ID bits to hash words */
* ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
* @input: input stream to modify
* @flex_bytes: the flexible bytes to load
**/
static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
u16 *flex_byte)
{
*flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
*flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
return 0; /* Process bits 0 and 16 */
} IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
/** /*
* ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
* @input: input stream to modify * delay this because bit 0 of the stream should not be processed
* @l4type: the layer 4 type value to load * so we do not add the vlan until after bit 0 was processed
**/ */
static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
u8 *l4type)
{
*l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
return 0; /* Process remaining 30 bit of the key */
IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
/* combine common_hash result with signature and bucket hashes */
bucket_hash ^= common_hash;
bucket_hash &= IXGBE_ATR_HASH_MASK;
sig_hash ^= common_hash << 16;
sig_hash &= IXGBE_ATR_HASH_MASK << 16;
/* return completed signature hash */
return sig_hash ^ bucket_hash;
} }
/** /**
* ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
* @stream: input bitstream * @input: unique input dword
* @common: compressed common input dword
* @queue: queue index to direct traffic to * @queue: queue index to direct traffic to
**/ **/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input, union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue) u8 queue)
{ {
u64 fdirhashcmd; u64 fdirhashcmd;
u64 fdircmd; u32 fdircmd;
u32 fdirhash;
u16 bucket_hash, sig_hash;
u8 l4type;
bucket_hash = ixgbe_atr_compute_hash_82599(input, /*
IXGBE_ATR_BUCKET_HASH_KEY); * Get the flow_type in order to program FDIRCMD properly
* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
*/
switch (input.formatted.flow_type) {
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
case IXGBE_ATR_FLOW_TYPE_SCTPV4:
case IXGBE_ATR_FLOW_TYPE_TCPV6:
case IXGBE_ATR_FLOW_TYPE_UDPV6:
case IXGBE_ATR_FLOW_TYPE_SCTPV6:
break;
default:
hw_dbg(hw, " Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
/* bucket_hash is only 15 bits */ /* configure FDIRCMD register */
bucket_hash &= IXGBE_ATR_HASH_MASK; fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
sig_hash = ixgbe_atr_compute_hash_82599(input, fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
IXGBE_ATR_SIGNATURE_HASH_KEY); fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
/* Get the l4type in order to program FDIRCMD properly */
/* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
ixgbe_atr_get_l4type_82599(input, &l4type);
/* /*
* The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
* is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
*/ */
fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; fdirhashcmd = (u64)fdircmd << 32;
fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
case IXGBE_ATR_L4TYPE_TCP:
fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
break;
case IXGBE_ATR_L4TYPE_UDP:
fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
break;
case IXGBE_ATR_L4TYPE_SCTP:
fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
break;
default:
hw_dbg(hw, "Error on l4type input\n");
return IXGBE_ERR_CONFIG;
}
if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
fdircmd |= IXGBE_FDIRCMD_IPV6;
fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
fdirhashcmd = ((fdircmd << 32) | fdirhash);
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
return 0; return 0;
} }
/**
* ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
* @input_mask: mask to be bit swapped
*
* The source and destination port masks for flow director are bit swapped
* in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
* generate a correctly swapped value we need to bit swap the mask and that
* is what is accomplished by this function.
**/
static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
{
u32 mask = ntohs(input_masks->dst_port_mask);
mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
mask |= ntohs(input_masks->src_port_mask);
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
}
/*
* These two macros are meant to address the fact that we have registers
* that are either all or in part big-endian. As a result on big-endian
* systems we will end up byte swapping the value to little-endian before
* it is byte swapped again and written to the hardware in the original
* big-endian format.
*/
#define IXGBE_STORE_AS_BE32(_value) \
(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
(((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
#define IXGBE_WRITE_REG_BE32(a, reg, value) \
IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
#define IXGBE_STORE_AS_BE16(_value) \
(((u16)(_value) >> 8) | ((u16)(_value) << 8))
/** /**
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
@ -1687,135 +1522,139 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* hardware writes must be protected from one another. * hardware writes must be protected from one another.
**/ **/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input, union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks, struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue) u16 soft_id, u8 queue)
{ {
u32 fdircmd = 0;
u32 fdirhash; u32 fdirhash;
u32 src_ipv4 = 0, dst_ipv4 = 0; u32 fdircmd;
u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; u32 fdirport, fdirtcpm;
u16 src_port, dst_port, vlan_id, flex_bytes; u32 fdirvlan;
u16 bucket_hash; /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
u8 l4type; u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
u8 fdirm = 0; IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
/* Get our input values */
ixgbe_atr_get_l4type_82599(input, &l4type);
/* /*
* Check l4type formatting, and bail out before we touch the hardware * Check flow_type formatting, and bail out before we touch the hardware
* if there's a configuration issue * if there's a configuration issue
*/ */
switch (l4type & IXGBE_ATR_L4TYPE_MASK) { switch (input->formatted.flow_type) {
case IXGBE_ATR_L4TYPE_TCP: case IXGBE_ATR_FLOW_TYPE_IPV4:
fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
break; fdirm |= IXGBE_FDIRM_L4P;
case IXGBE_ATR_L4TYPE_UDP: case IXGBE_ATR_FLOW_TYPE_SCTPV4:
fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; if (input_masks->dst_port_mask || input_masks->src_port_mask) {
break; hw_dbg(hw, " Error on src/dst port mask\n");
case IXGBE_ATR_L4TYPE_SCTP: return IXGBE_ERR_CONFIG;
fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; }
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
break; break;
default: default:
hw_dbg(hw, "Error on l4type input\n"); hw_dbg(hw, " Error on flow type input\n");
return IXGBE_ERR_CONFIG; return IXGBE_ERR_CONFIG;
} }
bucket_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY);
/* bucket_hash is only 15 bits */
bucket_hash &= IXGBE_ATR_HASH_MASK;
ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
ixgbe_atr_get_src_port_82599(input, &src_port);
ixgbe_atr_get_dst_port_82599(input, &dst_port);
ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
/* Now figure out if we're IPv4 or IPv6 */
if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
/* IPv6 */
ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
&src_ipv6_3, &src_ipv6_4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
/* The last 4 bytes is the same register as IPv4 */
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
fdircmd |= IXGBE_FDIRCMD_IPV6;
fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
} else {
/* IPv4 */
ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
}
ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
(flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
(dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
/* /*
* Program the relevant mask registers. L4type cannot be * Program the relevant mask registers. If src/dst_port or src/dst_addr
* masked out in this implementation. * are zero, then assume a full mask for that field. Also assume that
* a VLAN of 0 is unspecified, so mask that out as well. L4type
* cannot be masked out in this implementation.
* *
* This also assumes IPv4 only. IPv6 masking isn't supported at this * This also assumes IPv4 only. IPv6 masking isn't supported at this
* point in time. * point in time.
*/ */
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
switch (l4type & IXGBE_ATR_L4TYPE_MASK) { /* Program FDIRM */
case IXGBE_ATR_L4TYPE_TCP: switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask); case 0xEFFF:
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
(IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | fdirm &= ~IXGBE_FDIRM_VLANID;
(input_masks->dst_port_mask << 16))); case 0xE000:
/* Unmask VLAN prio - bit 1 */
fdirm &= ~IXGBE_FDIRM_VLANP;
break; break;
case IXGBE_ATR_L4TYPE_UDP: case 0x0FFF:
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask); /* Unmask VLAN ID - bit 0 */
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, fdirm &= ~IXGBE_FDIRM_VLANID;
(IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | break;
(input_masks->src_port_mask << 16))); case 0x0000:
/* do nothing, vlans already masked */
break; break;
default: default:
/* this already would have failed above */ hw_dbg(hw, " Error on VLAN mask\n");
break; return IXGBE_ERR_CONFIG;
} }
/* Program the last mask register, FDIRM */ if (input_masks->flex_mask & 0xFFFF) {
if (input_masks->vlan_id_mask) if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
/* Mask both VLAN and VLANP - bits 0 and 1 */ hw_dbg(hw, " Error on flexible byte mask\n");
fdirm |= 0x3; return IXGBE_ERR_CONFIG;
}
if (input_masks->data_mask) /* Unmask Flex Bytes - bit 4 */
/* Flex bytes need masking, so mask the whole thing - bit 4 */ fdirm &= ~IXGBE_FDIRM_FLEX;
fdirm |= 0x10; }
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
fdirm |= 0x24;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; /* store the TCP/UDP port masks, bit reversed from port layout */
fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
fdircmd |= IXGBE_FDIRCMD_LAST;
fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; /* write both the same so that UDP and TCP use the same mask */
fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
/* store source and destination IP masks (big-enian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
~input_masks->src_ip_mask[0]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
~input_masks->dst_ip_mask[0]);
/* Apply masks to input data */
input->formatted.vlan_id &= input_masks->vlan_id_mask;
input->formatted.flex_bytes &= input_masks->flex_mask;
input->formatted.src_port &= input_masks->src_port_mask;
input->formatted.dst_port &= input_masks->dst_port_mask;
input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
/* record vlan (little-endian) and flex_bytes(big-endian) */
fdirvlan =
IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
fdirvlan |= ntohs(input->formatted.vlan_id);
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
/* record source and destination port (little-endian)*/
fdirport = ntohs(input->formatted.dst_port);
fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
fdirport |= ntohs(input->formatted.src_port);
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
/* record the first 32 bits of the destination address (big-endian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
/* record the source address (big-endian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
/* we only want the bucket hash so drop the upper 16 bits */
fdirhash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY);
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
return 0; return 0;
} }
/** /**
* ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
* @hw: pointer to hardware structure * @hw: pointer to hardware structure

View File

@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
reg_ctl &= ~IXGBE_RXCTRL_RXEN; reg_ctl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx)); ixgbe_disable_rx_queue(adapter, rx_ring);
reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
/* now Tx */ /* now Tx */
reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
@ -2279,10 +2277,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
struct ethtool_rx_ntuple *cmd) struct ethtool_rx_ntuple *cmd)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
struct ixgbe_atr_input input_struct; union ixgbe_atr_input input_struct;
struct ixgbe_atr_input_masks input_masks; struct ixgbe_atr_input_masks input_masks;
int target_queue; int target_queue;
int err;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) if (adapter->hw.mac.type == ixgbe_mac_82598EB)
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -2291,67 +2290,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
* Don't allow programming if the action is a queue greater than * Don't allow programming if the action is a queue greater than
* the number of online Tx queues. * the number of online Tx queues.
*/ */
if ((fs.action >= adapter->num_tx_queues) || if ((fs->action >= adapter->num_tx_queues) ||
(fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
return -EINVAL; return -EINVAL;
memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; /* record flow type */
input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; switch (fs->flow_type) {
input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; case IPV4_FLOW:
input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
input_masks.vlan_id_mask = fs.vlan_tag_mask; break;
/* only use the lowest 2 bytes for flex bytes */
input_masks.data_mask = (fs.data_mask & 0xffff);
switch (fs.flow_type) {
case TCP_V4_FLOW: case TCP_V4_FLOW:
ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break; break;
case UDP_V4_FLOW: case UDP_V4_FLOW:
ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break; break;
case SCTP_V4_FLOW: case SCTP_V4_FLOW:
ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break; break;
default: default:
return -1; return -1;
} }
/* Mask bits from the inputs based on user-supplied mask */ /* copy vlan tag minus the CFI bit */
ixgbe_atr_set_src_ipv4_82599(&input_struct, if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
(fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
ixgbe_atr_set_dst_ipv4_82599(&input_struct, if (!fs->vlan_tag_mask) {
(fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); input_masks.vlan_id_mask = htons(0xEFFF);
/* 82599 expects these to be byte-swapped for perfect filtering */ } else {
ixgbe_atr_set_src_port_82599(&input_struct, switch (~fs->vlan_tag_mask & 0xEFFF) {
((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); /* all of these are valid vlan-mask values */
ixgbe_atr_set_dst_port_82599(&input_struct, case 0xEFFF:
((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); case 0xE000:
case 0x0FFF:
case 0x0000:
input_masks.vlan_id_mask =
htons(~fs->vlan_tag_mask);
break;
/* exit with error if vlan-mask is invalid */
default:
e_err(drv, "Partial VLAN ID or "
"priority mask in vlan-mask is not "
"supported by hardware\n");
return -1;
}
}
}
/* VLAN and Flex bytes are either completely masked or not */ /* make sure we only use the first 2 bytes of user data */
if (!fs.vlan_tag_mask) if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
if (!(fs->data_mask & 0xFFFF)) {
input_masks.flex_mask = 0xFFFF;
} else if (~fs->data_mask & 0xFFFF) {
e_err(drv, "Partial user-def-mask is not "
"supported by hardware\n");
return -1;
}
}
if (!input_masks.data_mask) /*
/* make sure we only use the first 2 bytes of user data */ * Copy input into formatted structures
ixgbe_atr_set_flex_byte_82599(&input_struct, *
(fs.data & 0xffff)); * These assignments are based on the following logic
* If neither input or mask are set assume value is masked out.
* If input is set, but mask is not mask should default to accept all.
* If input is not set, but mask is set then mask likely results in 0.
* If input is set and mask is set then assign both.
*/
if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
if (!fs->m_u.tcp_ip4_spec.ip4src)
input_masks.src_ip_mask[0] = 0xFFFFFFFF;
else
input_masks.src_ip_mask[0] =
~fs->m_u.tcp_ip4_spec.ip4src;
}
if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
if (!fs->m_u.tcp_ip4_spec.ip4dst)
input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
else
input_masks.dst_ip_mask[0] =
~fs->m_u.tcp_ip4_spec.ip4dst;
}
if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
if (!fs->m_u.tcp_ip4_spec.psrc)
input_masks.src_port_mask = 0xFFFF;
else
input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
}
if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
if (!fs->m_u.tcp_ip4_spec.pdst)
input_masks.dst_port_mask = 0xFFFF;
else
input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
}
/* determine if we need to drop or route the packet */ /* determine if we need to drop or route the packet */
if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
target_queue = MAX_RX_QUEUES - 1; target_queue = MAX_RX_QUEUES - 1;
else else
target_queue = fs.action; target_queue = fs->action;
spin_lock(&adapter->fdir_perfect_lock); spin_lock(&adapter->fdir_perfect_lock);
ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
&input_masks, 0, target_queue); &input_struct,
&input_masks, 0,
target_queue);
spin_unlock(&adapter->fdir_perfect_lock); spin_unlock(&adapter->fdir_perfect_lock);
return 0; return err ? -1 : 0;
} }
static const struct ethtool_ops ixgbe_ethtool_ops = { static const struct ethtool_ops ixgbe_ethtool_ops = {

View File

@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
} }
} }
void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
int wait_loop = IXGBE_MAX_RX_DESC_POLL;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
/* write value back with RXDCTL.ENABLE bit cleared */
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
return;
/* the hardware may take up to 100us to really disable the rx queue */
do {
udelay(10);
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
if (!wait_loop) {
e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
"the polling period\n", reg_idx);
}
}
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring) struct ixgbe_ring *ring)
{ {
@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
/* disable queue to avoid issues while updating state */ /* disable queue to avoid issues while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), ixgbe_disable_rx_queue(adapter, ring);
rxdctl & ~IXGBE_RXDCTL_ENABLE);
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@ -4064,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
IXGBE_WRITE_FLUSH(hw); /* disable all enabled rx queues */
for (i = 0; i < adapter->num_rx_queues; i++)
/* this call also flushes the previous write */
ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
msleep(10); msleep(10);
netif_tx_stop_all_queues(netdev); netif_tx_stop_all_queues(netdev);
@ -4789,6 +4821,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
e_err(probe,
"Flow Director is not supported while multiple "
"queues are disabled. Disabling Flow Director\n");
}
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0; adapter->atr_sample_rate = 0;
@ -5094,16 +5132,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
if (dev->features & NETIF_F_NTUPLE) { /* n-tuple support exists, always init our spinlock */
/* Flow Director perfect filter enabled */ spin_lock_init(&adapter->fdir_perfect_lock);
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; /* Flow Director hash filters enabled */
adapter->atr_sample_rate = 0; adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
spin_lock_init(&adapter->fdir_perfect_lock); adapter->atr_sample_rate = 20;
} else {
/* Flow Director hash filters enabled */
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
}
adapter->ring_feature[RING_F_FDIR].indices = adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES; IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = 0; adapter->fdir_pballoc = 0;
@ -6474,38 +6507,92 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
} }
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
u8 queue, u32 tx_flags, __be16 protocol) u32 tx_flags, __be16 protocol)
{ {
struct ixgbe_atr_input atr_input; struct ixgbe_q_vector *q_vector = ring->q_vector;
struct iphdr *iph = ip_hdr(skb); union ixgbe_atr_hash_dword input = { .dword = 0 };
struct ethhdr *eth = (struct ethhdr *)skb->data; union ixgbe_atr_hash_dword common = { .dword = 0 };
union {
unsigned char *network;
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
} hdr;
struct tcphdr *th; struct tcphdr *th;
u16 vlan_id; __be16 vlan_id;
/* Right now, we support IPv4 w/ TCP only */ /* if ring doesn't have a interrupt vector, cannot perform ATR */
if (protocol != htons(ETH_P_IP) || if (!q_vector)
iph->protocol != IPPROTO_TCP)
return; return;
memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); /* do nothing if sampling is disabled */
if (!ring->atr_sample_rate)
return;
vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> ring->atr_count++;
IXGBE_TX_FLAGS_VLAN_SHIFT;
/* snag network header to get L4 type and address */
hdr.network = skb_network_header(skb);
/* Currently only IPv4/IPv6 with TCP is supported */
if ((protocol != __constant_htons(ETH_P_IPV6) ||
hdr.ipv6->nexthdr != IPPROTO_TCP) &&
(protocol != __constant_htons(ETH_P_IP) ||
hdr.ipv4->protocol != IPPROTO_TCP))
return;
th = tcp_hdr(skb); th = tcp_hdr(skb);
ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); /* skip this packet since the socket is closing */
ixgbe_atr_set_src_port_82599(&atr_input, th->dest); if (th->fin)
ixgbe_atr_set_dst_port_82599(&atr_input, th->source); return;
ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP); /* sample on all syn packets or once every atr sample count */
/* src and dst are inverted, think how the receiver sees them */ if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr); return;
ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
/* reset sample count */
ring->atr_count = 0;
vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
/*
* src and dst are inverted, think how the receiver sees them
*
* The input is broken into two sections, a non-compressed section
* containing vm_pool, vlan_id, and flow_type. The rest of the data
* is XORed together and stored in the compressed dword.
*/
input.formatted.vlan_id = vlan_id;
/*
* since src port and flex bytes occupy the same word XOR them together
* and write the value to source port portion of compressed dword
*/
if (vlan_id)
common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
else
common.port.src ^= th->dest ^ protocol;
common.port.dst ^= th->source;
if (protocol == __constant_htons(ETH_P_IP)) {
input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
} else {
input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
hdr.ipv6->saddr.s6_addr32[1] ^
hdr.ipv6->saddr.s6_addr32[2] ^
hdr.ipv6->saddr.s6_addr32[3] ^
hdr.ipv6->daddr.s6_addr32[0] ^
hdr.ipv6->daddr.s6_addr32[1] ^
hdr.ipv6->daddr.s6_addr32[2] ^
hdr.ipv6->daddr.s6_addr32[3];
}
/* This assumes the Rx queue and Tx queue are bound to the same CPU */ /* This assumes the Rx queue and Tx queue are bound to the same CPU */
ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
input, common, ring->queue_index);
} }
static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
@ -6676,16 +6763,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
if (count) { if (count) {
/* add the ATR filter if ATR is on */ /* add the ATR filter if ATR is on */
if (tx_ring->atr_sample_rate) { if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
++tx_ring->atr_count; ixgbe_atr(tx_ring, skb, tx_flags, protocol);
if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
test_bit(__IXGBE_TX_FDIR_INIT_DONE,
&tx_ring->state)) {
ixgbe_atr(adapter, skb, tx_ring->queue_index,
tx_flags, protocol);
tx_ring->atr_count = 0;
}
}
txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
txq->tx_bytes += skb->len; txq->tx_bytes += skb->len;
txq->tx_packets++; txq->tx_packets++;

View File

@ -1947,10 +1947,9 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRM_VLANID 0x00000001 #define IXGBE_FDIRM_VLANID 0x00000001
#define IXGBE_FDIRM_VLANP 0x00000002 #define IXGBE_FDIRM_VLANP 0x00000002
#define IXGBE_FDIRM_POOL 0x00000004 #define IXGBE_FDIRM_POOL 0x00000004
#define IXGBE_FDIRM_L3P 0x00000008 #define IXGBE_FDIRM_L4P 0x00000008
#define IXGBE_FDIRM_L4P 0x00000010 #define IXGBE_FDIRM_FLEX 0x00000010
#define IXGBE_FDIRM_FLEX 0x00000020 #define IXGBE_FDIRM_DIPv6 0x00000020
#define IXGBE_FDIRM_DIPv6 0x00000040
#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF #define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
#define IXGBE_FDIRFREE_FREE_SHIFT 0 #define IXGBE_FDIRFREE_FREE_SHIFT 0
@ -1990,6 +1989,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_LAST 0x00000800 #define IXGBE_FDIRCMD_LAST 0x00000800
#define IXGBE_FDIRCMD_COLLISION 0x00001000 #define IXGBE_FDIRCMD_COLLISION 0x00001000
#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 #define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 #define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
#define IXGBE_FDIR_INIT_DONE_POLL 10 #define IXGBE_FDIR_INIT_DONE_POLL 10
@ -2147,51 +2147,80 @@ typedef u32 ixgbe_physical_layer;
#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) #define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
/* Software ATR hash keys */ /* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D #define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
/* Software ATR input stream offsets and masks */
#define IXGBE_ATR_VLAN_OFFSET 0
#define IXGBE_ATR_SRC_IPV6_OFFSET 2
#define IXGBE_ATR_SRC_IPV4_OFFSET 14
#define IXGBE_ATR_DST_IPV6_OFFSET 18
#define IXGBE_ATR_DST_IPV4_OFFSET 30
#define IXGBE_ATR_SRC_PORT_OFFSET 34
#define IXGBE_ATR_DST_PORT_OFFSET 36
#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
#define IXGBE_ATR_VM_POOL_OFFSET 40
#define IXGBE_ATR_L4TYPE_OFFSET 41
/* Software ATR input stream values and masks */
#define IXGBE_ATR_HASH_MASK 0x7fff
#define IXGBE_ATR_L4TYPE_MASK 0x3 #define IXGBE_ATR_L4TYPE_MASK 0x3
#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
#define IXGBE_ATR_L4TYPE_UDP 0x1 #define IXGBE_ATR_L4TYPE_UDP 0x1
#define IXGBE_ATR_L4TYPE_TCP 0x2 #define IXGBE_ATR_L4TYPE_TCP 0x2
#define IXGBE_ATR_L4TYPE_SCTP 0x3 #define IXGBE_ATR_L4TYPE_SCTP 0x3
#define IXGBE_ATR_HASH_MASK 0x7fff #define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
enum ixgbe_atr_flow_type {
IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
};
/* Flow Director ATR input struct. */ /* Flow Director ATR input struct. */
struct ixgbe_atr_input { union ixgbe_atr_input {
/* Byte layout in order, all values with MSB first: /*
* Byte layout in order, all values with MSB first:
* *
* vm_pool - 1 byte
* flow_type - 1 byte
* vlan_id - 2 bytes * vlan_id - 2 bytes
* src_ip - 16 bytes * src_ip - 16 bytes
* dst_ip - 16 bytes * dst_ip - 16 bytes
* src_port - 2 bytes * src_port - 2 bytes
* dst_port - 2 bytes * dst_port - 2 bytes
* flex_bytes - 2 bytes * flex_bytes - 2 bytes
* vm_pool - 1 byte * rsvd0 - 2 bytes - space reserved must be 0.
* l4type - 1 byte
*/ */
u8 byte_stream[42]; struct {
u8 vm_pool;
u8 flow_type;
__be16 vlan_id;
__be32 dst_ip[4];
__be32 src_ip[4];
__be16 src_port;
__be16 dst_port;
__be16 flex_bytes;
__be16 rsvd0;
} formatted;
__be32 dword_stream[11];
};
/* Flow Director compressed ATR hash input struct */
union ixgbe_atr_hash_dword {
struct {
u8 vm_pool;
u8 flow_type;
__be16 vlan_id;
} formatted;
__be32 ip;
struct {
__be16 src;
__be16 dst;
} port;
__be16 flex_bytes;
__be32 dword;
}; };
struct ixgbe_atr_input_masks { struct ixgbe_atr_input_masks {
u32 src_ip_mask; __be16 rsvd0;
u32 dst_ip_mask; __be16 vlan_id_mask;
u16 src_port_mask; __be32 dst_ip_mask[4];
u16 dst_port_mask; __be32 src_ip_mask[4];
u16 vlan_id_mask; __be16 src_port_mask;
u16 data_mask; __be16 dst_port_mask;
__be16 flex_mask;
}; };
enum ixgbe_eeprom_type { enum ixgbe_eeprom_type {

View File

@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
int i; int i;
int err; int err;
dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
prof->tx_ring_num, prof->rx_ring_num);
if (dev == NULL) { if (dev == NULL) {
mlx4_err(mdev, "Net device allocation failed\n"); mlx4_err(mdev, "Net device allocation failed\n");
return -ENOMEM; return -ENOMEM;

View File

@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),

View File

@ -32,6 +32,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/unaligned.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/string.h> #include <asm/string.h>
@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap)
data = ap->tpkt->data; data = ap->tpkt->data;
count = ap->tpkt->len; count = ap->tpkt->len;
fcs = ap->tfcs; fcs = ap->tfcs;
proto = (data[0] << 8) + data[1]; proto = get_unaligned_be16(data);
/* /*
* LCP packets with code values between 1 (configure-reqest) * LCP packets with code values between 1 (configure-reqest)
@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
code = data[0]; code = data[0];
if (code != CONFACK && code != CONFREQ) if (code != CONFACK && code != CONFREQ)
return; return;
dlen = (data[2] << 8) + data[3]; dlen = get_unaligned_be16(data + 2);
if (len < dlen) if (len < dlen)
return; /* packet got truncated or length is bogus */ return; /* packet got truncated or length is bogus */
@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
switch (data[0]) { switch (data[0]) {
case LCP_MRU: case LCP_MRU:
val = (data[2] << 8) + data[3]; val = get_unaligned_be16(data + 2);
if (inbound) if (inbound)
ap->mru = val; ap->mru = val;
else else
ap->chan.mtu = val; ap->chan.mtu = val;
break; break;
case LCP_ASYNCMAP: case LCP_ASYNCMAP:
val = (data[2] << 24) + (data[3] << 16) val = get_unaligned_be32(data + 2);
+ (data[4] << 8) + data[5];
if (inbound) if (inbound)
ap->raccm = val; ap->raccm = val;
else else

View File

@ -41,6 +41,7 @@
#include <linux/ppp-comp.h> #include <linux/ppp-comp.h>
#include <linux/zlib.h> #include <linux/zlib.h>
#include <asm/unaligned.h>
/* /*
* State for a Deflate (de)compressor. * State for a Deflate (de)compressor.
@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
*/ */
wptr[0] = PPP_ADDRESS(rptr); wptr[0] = PPP_ADDRESS(rptr);
wptr[1] = PPP_CONTROL(rptr); wptr[1] = PPP_CONTROL(rptr);
wptr[2] = PPP_COMP >> 8; put_unaligned_be16(PPP_COMP, wptr + 2);
wptr[3] = PPP_COMP;
wptr += PPP_HDRLEN; wptr += PPP_HDRLEN;
wptr[0] = state->seqno >> 8; put_unaligned_be16(state->seqno, wptr);
wptr[1] = state->seqno;
wptr += DEFLATE_OVHD; wptr += DEFLATE_OVHD;
olen = PPP_HDRLEN + DEFLATE_OVHD; olen = PPP_HDRLEN + DEFLATE_OVHD;
state->strm.next_out = wptr; state->strm.next_out = wptr;
@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize,
} }
/* Check the sequence number. */ /* Check the sequence number. */
seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1]; seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
if (seq != (state->seqno & 0xffff)) { if (seq != (state->seqno & 0xffff)) {
if (state->debug) if (state->debug)
printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",

View File

@ -46,6 +46,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/unaligned.h>
#include <net/slhc_vj.h> #include <net/slhc_vj.h>
#include <asm/atomic.h> #include <asm/atomic.h>
@ -210,7 +211,7 @@ struct ppp_net {
}; };
/* Get the PPP protocol number from a skb */ /* Get the PPP protocol number from a skb */
#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) #define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
/* We limit the length of ppp->file.rq to this (arbitrary) value */ /* We limit the length of ppp->file.rq to this (arbitrary) value */
#define PPP_MAX_RQLEN 32 #define PPP_MAX_RQLEN 32
@ -964,8 +965,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
pp = skb_push(skb, 2); pp = skb_push(skb, 2);
proto = npindex_to_proto[npi]; proto = npindex_to_proto[npi];
pp[0] = proto >> 8; put_unaligned_be16(proto, pp);
pp[1] = proto;
netif_stop_queue(dev); netif_stop_queue(dev);
skb_queue_tail(&ppp->file.xq, skb); skb_queue_tail(&ppp->file.xq, skb);
@ -1473,8 +1473,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
q = skb_put(frag, flen + hdrlen); q = skb_put(frag, flen + hdrlen);
/* make the MP header */ /* make the MP header */
q[0] = PPP_MP >> 8; put_unaligned_be16(PPP_MP, q);
q[1] = PPP_MP;
if (ppp->flags & SC_MP_XSHORTSEQ) { if (ppp->flags & SC_MP_XSHORTSEQ) {
q[2] = bits + ((ppp->nxseq >> 8) & 0xf); q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
q[3] = ppp->nxseq; q[3] = ppp->nxseq;

View File

@ -55,6 +55,7 @@
#include <linux/ppp_defs.h> #include <linux/ppp_defs.h>
#include <linux/ppp-comp.h> #include <linux/ppp-comp.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <asm/unaligned.h>
#include "ppp_mppe.h" #include "ppp_mppe.h"
@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
*/ */
obuf[0] = PPP_ADDRESS(ibuf); obuf[0] = PPP_ADDRESS(ibuf);
obuf[1] = PPP_CONTROL(ibuf); obuf[1] = PPP_CONTROL(ibuf);
obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ put_unaligned_be16(PPP_COMP, obuf + 2);
obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */
obuf += PPP_HDRLEN; obuf += PPP_HDRLEN;
state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
if (state->debug >= 7) if (state->debug >= 7)
printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
state->ccount); state->ccount);
obuf[0] = state->ccount >> 8; put_unaligned_be16(state->ccount, obuf);
obuf[1] = state->ccount & 0xff;
if (!state->stateful || /* stateless mode */ if (!state->stateful || /* stateless mode */
((state->ccount & 0xff) == 0xff) || /* "flag" packet */ ((state->ccount & 0xff) == 0xff) || /* "flag" packet */

View File

@ -45,6 +45,7 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/unaligned.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#define PPP_VERSION "2.4.2" #define PPP_VERSION "2.4.2"
@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
int islcp; int islcp;
data = skb->data; data = skb->data;
proto = (data[0] << 8) + data[1]; proto = get_unaligned_be16(data);
/* LCP packets with codes between 1 (configure-request) /* LCP packets with codes between 1 (configure-request)
* and 7 (code-reject) must be sent as though no options * and 7 (code-reject) must be sent as though no options

View File

@ -34,8 +34,8 @@
#define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0 #define _QLCNIC_LINUX_MINOR 0
#define _QLCNIC_LINUX_SUBVERSION 14 #define _QLCNIC_LINUX_SUBVERSION 15
#define QLCNIC_LINUX_VERSIONID "5.0.14" #define QLCNIC_LINUX_VERSIONID "5.0.15"
#define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@ -289,6 +289,26 @@ struct uni_data_desc{
u32 reserved[5]; u32 reserved[5];
}; };
/* Flash Defines and Structures */
#define QLCNIC_FLT_LOCATION 0x3F1000
#define QLCNIC_FW_IMAGE_REGION 0x74
struct qlcnic_flt_header {
u16 version;
u16 len;
u16 checksum;
u16 reserved;
};
struct qlcnic_flt_entry {
u8 region;
u8 reserved0;
u8 attrib;
u8 reserved1;
u32 size;
u32 start_addr;
u32 end_add;
};
/* Magic number to let user know flash is programmed */ /* Magic number to let user know flash is programmed */
#define QLCNIC_BDINFO_MAGIC 0x12345678 #define QLCNIC_BDINFO_MAGIC 0x12345678

View File

@ -672,7 +672,7 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
if (data[1]) if (data[1])
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
if (eth_test->flags == ETH_TEST_FL_OFFLINE) { if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
data[2] = qlcnic_irq_test(dev); data[2] = qlcnic_irq_test(dev);
if (data[2]) if (data[2])
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;

View File

@ -627,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
return 0; return 0;
} }
static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
struct qlcnic_flt_entry *region_entry)
{
struct qlcnic_flt_header flt_hdr;
struct qlcnic_flt_entry *flt_entry;
int i = 0, ret;
u32 entry_size;
memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
(u8 *)&flt_hdr,
sizeof(struct qlcnic_flt_header));
if (ret) {
dev_warn(&adapter->pdev->dev,
"error reading flash layout header\n");
return -EIO;
}
entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
if (flt_entry == NULL) {
dev_warn(&adapter->pdev->dev, "error allocating memory\n");
return -EIO;
}
ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
sizeof(struct qlcnic_flt_header),
(u8 *)flt_entry, entry_size);
if (ret) {
dev_warn(&adapter->pdev->dev,
"error reading flash layout entries\n");
goto err_out;
}
while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
if (flt_entry[i].region == region)
break;
i++;
}
if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
dev_warn(&adapter->pdev->dev,
"region=%x not found in %d regions\n", region, i);
ret = -EIO;
goto err_out;
}
memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
err_out:
vfree(flt_entry);
return ret;
}
int int
qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
{ {
struct qlcnic_flt_entry fw_entry;
u32 ver = -1, min_ver; u32 ver = -1, min_ver;
int ret;
qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
if (!ret)
/* 0-4:-signature, 4-8:-fw version */
qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
(int *)&ver);
else
qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
(int *)&ver);
ver = QLCNIC_DECODE_VERSION(ver); ver = QLCNIC_DECODE_VERSION(ver);
min_ver = QLCNIC_MIN_FW_VERSION; min_ver = QLCNIC_MIN_FW_VERSION;

View File

@ -31,15 +31,15 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
static struct workqueue_struct *qlcnic_wq; static struct workqueue_struct *qlcnic_wq;
static int qlcnic_mac_learn; static int qlcnic_mac_learn;
module_param(qlcnic_mac_learn, int, 0644); module_param(qlcnic_mac_learn, int, 0444);
MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
static int use_msi = 1; static int use_msi = 1;
module_param(use_msi, int, 0644); module_param(use_msi, int, 0444);
MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
static int use_msi_x = 1; static int use_msi_x = 1;
module_param(use_msi_x, int, 0644); module_param(use_msi_x, int, 0444);
MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
static int auto_fw_reset = AUTO_FW_RESET_ENABLED; static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
@ -47,11 +47,11 @@ module_param(auto_fw_reset, int, 0644);
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
static int load_fw_file; static int load_fw_file;
module_param(load_fw_file, int, 0644); module_param(load_fw_file, int, 0444);
MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
static int qlcnic_config_npars; static int qlcnic_config_npars;
module_param(qlcnic_config_npars, int, 0644); module_param(qlcnic_config_npars, int, 0444);
MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
static int __devinit qlcnic_probe(struct pci_dev *pdev, static int __devinit qlcnic_probe(struct pci_dev *pdev,

View File

@ -1632,36 +1632,134 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
{ {
__le32 *phytable = (__le32 *)fw->data; __le32 *phytable = (__le32 *)fw->data;
struct net_device *dev = tp->dev; struct net_device *dev = tp->dev;
size_t i; size_t index, fw_size = fw->size / sizeof(*phytable);
u32 predata, count;
if (fw->size % sizeof(*phytable)) { if (fw->size % sizeof(*phytable)) {
netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
return; return;
} }
for (i = 0; i < fw->size / sizeof(*phytable); i++) { for (index = 0; index < fw_size; index++) {
u32 action = le32_to_cpu(phytable[i]); u32 action = le32_to_cpu(phytable[index]);
u32 regno = (action & 0x0fff0000) >> 16;
if (!action) switch(action & 0xf0000000) {
case PHY_READ:
case PHY_DATA_OR:
case PHY_DATA_AND:
case PHY_READ_EFUSE:
case PHY_CLEAR_READCOUNT:
case PHY_WRITE:
case PHY_WRITE_PREVIOUS:
case PHY_DELAY_MS:
break; break;
if ((action & 0xf0000000) != PHY_WRITE) { case PHY_BJMPN:
netif_err(tp, probe, dev, if (regno > index) {
"unknown action 0x%08x\n", action); netif_err(tp, probe, tp->dev,
"Out of range of firmware\n");
return;
}
break;
case PHY_READCOUNT_EQ_SKIP:
if (index + 2 >= fw_size) {
netif_err(tp, probe, tp->dev,
"Out of range of firmware\n");
return;
}
break;
case PHY_COMP_EQ_SKIPN:
case PHY_COMP_NEQ_SKIPN:
case PHY_SKIPN:
if (index + 1 + regno >= fw_size) {
netif_err(tp, probe, tp->dev,
"Out of range of firmware\n");
return;
}
break;
case PHY_READ_MAC_BYTE:
case PHY_WRITE_MAC_BYTE:
case PHY_WRITE_ERI_WORD:
default:
netif_err(tp, probe, tp->dev,
"Invalid action 0x%08x\n", action);
return; return;
} }
} }
while (i-- != 0) { predata = 0;
u32 action = le32_to_cpu(*phytable); count = 0;
for (index = 0; index < fw_size; ) {
u32 action = le32_to_cpu(phytable[index]);
u32 data = action & 0x0000ffff; u32 data = action & 0x0000ffff;
u32 reg = (action & 0x0fff0000) >> 16; u32 regno = (action & 0x0fff0000) >> 16;
if (!action)
break;
switch(action & 0xf0000000) { switch(action & 0xf0000000) {
case PHY_WRITE: case PHY_READ:
rtl_writephy(tp, reg, data); predata = rtl_readphy(tp, regno);
phytable++; count++;
index++;
break; break;
case PHY_DATA_OR:
predata |= data;
index++;
break;
case PHY_DATA_AND:
predata &= data;
index++;
break;
case PHY_BJMPN:
index -= regno;
break;
case PHY_READ_EFUSE:
predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
index++;
break;
case PHY_CLEAR_READCOUNT:
count = 0;
index++;
break;
case PHY_WRITE:
rtl_writephy(tp, regno, data);
index++;
break;
case PHY_READCOUNT_EQ_SKIP:
if (count == data)
index += 2;
else
index += 1;
break;
case PHY_COMP_EQ_SKIPN:
if (predata == data)
index += regno;
index++;
break;
case PHY_COMP_NEQ_SKIPN:
if (predata != data)
index += regno;
index++;
break;
case PHY_WRITE_PREVIOUS:
rtl_writephy(tp, regno, predata);
index++;
break;
case PHY_SKIPN:
index += regno + 1;
break;
case PHY_DELAY_MS:
mdelay(data);
index++;
break;
case PHY_READ_MAC_BYTE:
case PHY_WRITE_MAC_BYTE:
case PHY_WRITE_ERI_WORD:
default: default:
BUG(); BUG();
} }
@ -3069,15 +3167,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl8168_driver_start(tp); rtl8168_driver_start(tp);
} }
rtl8169_init_phy(dev, tp);
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
* Interrupts stop flowing on high load on 8110SCd controllers.
*/
if (tp->mac_version == RTL_GIGA_MAC_VER_05)
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
if (pci_dev_run_wake(pdev)) if (pci_dev_run_wake(pdev))
@ -3127,6 +3216,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
static int rtl8169_open(struct net_device *dev) static int rtl8169_open(struct net_device *dev)
{ {
struct rtl8169_private *tp = netdev_priv(dev); struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev; struct pci_dev *pdev = tp->pci_dev;
int retval = -ENOMEM; int retval = -ENOMEM;
@ -3162,6 +3252,15 @@ static int rtl8169_open(struct net_device *dev)
napi_enable(&tp->napi); napi_enable(&tp->napi);
rtl8169_init_phy(dev, tp);
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
* Interrupts stop flowing on high load on 8110SCd controllers.
*/
if (tp->mac_version == RTL_GIGA_MAC_VER_05)
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
rtl_pll_power_up(tp); rtl_pll_power_up(tp);
rtl_hw_start(dev); rtl_hw_start(dev);
@ -3171,7 +3270,7 @@ static int rtl8169_open(struct net_device *dev)
tp->saved_wolopts = 0; tp->saved_wolopts = 0;
pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_noidle(&pdev->dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr); rtl8169_check_link_status(dev, tp, ioaddr);
out: out:
return retval; return retval;

View File

@ -46,10 +46,6 @@
#include <asm/irq.h> #include <asm/irq.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define SKY2_VLAN_TAG_USED 1
#endif
#include "sky2.h" #include "sky2.h"
#define DRV_NAME "sky2" #define DRV_NAME "sky2"
@ -1326,39 +1322,34 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err; return err;
} }
#ifdef SKY2_VLAN_TAG_USED #define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
{
if (onoff) {
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_ON);
} else {
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_OFF);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_OFF);
}
}
static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) static void sky2_vlan_mode(struct net_device *dev)
{ {
struct sky2_port *sky2 = netdev_priv(dev); struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw; struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port; u16 port = sky2->port;
netif_tx_lock_bh(dev); if (dev->features & NETIF_F_HW_VLAN_RX)
napi_disable(&hw->napi); sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON);
else
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_OFF);
sky2->vlgrp = grp; dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
sky2_set_vlan_mode(hw, port, grp != NULL); if (dev->features & NETIF_F_HW_VLAN_TX)
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_ON);
else {
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_OFF);
sky2_read32(hw, B0_Y2_SP_LISR); /* Can't do transmit offload of vlan without hw vlan */
napi_enable(&hw->napi); dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
netif_tx_unlock_bh(dev); | NETIF_F_ALL_CSUM);
}
} }
#endif
/* Amount of required worst case padding in rx buffer */ /* Amount of required worst case padding in rx buffer */
static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
@ -1635,9 +1626,7 @@ static void sky2_hw_up(struct sky2_port *sky2)
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
sky2->tx_ring_size - 1); sky2->tx_ring_size - 1);
#ifdef SKY2_VLAN_TAG_USED sky2_vlan_mode(sky2->netdev);
sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
#endif
sky2_rx_start(sky2); sky2_rx_start(sky2);
} }
@ -1780,7 +1769,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
} }
ctrl = 0; ctrl = 0;
#ifdef SKY2_VLAN_TAG_USED
/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
if (vlan_tx_tag_present(skb)) { if (vlan_tx_tag_present(skb)) {
if (!le) { if (!le) {
@ -1792,7 +1781,6 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
le->length = cpu_to_be16(vlan_tx_tag_get(skb)); le->length = cpu_to_be16(vlan_tx_tag_get(skb));
ctrl |= INS_VLAN; ctrl |= INS_VLAN;
} }
#endif
/* Handle TCP checksum offload */ /* Handle TCP checksum offload */
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
@ -2432,11 +2420,8 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
u16 count = (status & GMR_FS_LEN) >> 16; u16 count = (status & GMR_FS_LEN) >> 16;
#ifdef SKY2_VLAN_TAG_USED if (status & GMR_FS_VLAN)
/* Account for vlan tag */ count -= VLAN_HLEN; /* Account for vlan tag */
if (sky2->vlgrp && (status & GMR_FS_VLAN))
count -= VLAN_HLEN;
#endif
netif_printk(sky2, rx_status, KERN_DEBUG, dev, netif_printk(sky2, rx_status, KERN_DEBUG, dev,
"rx slot %u status 0x%x len %d\n", "rx slot %u status 0x%x len %d\n",
@ -2504,17 +2489,9 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
static inline void sky2_skb_rx(const struct sky2_port *sky2, static inline void sky2_skb_rx(const struct sky2_port *sky2,
u32 status, struct sk_buff *skb) u32 status, struct sk_buff *skb)
{ {
#ifdef SKY2_VLAN_TAG_USED if (status & GMR_FS_VLAN)
u16 vlan_tag = be16_to_cpu(sky2->rx_tag); __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
if (skb->ip_summed == CHECKSUM_NONE)
vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
else
vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
vlan_tag, skb);
return;
}
#endif
if (skb->ip_summed == CHECKSUM_NONE) if (skb->ip_summed == CHECKSUM_NONE)
netif_receive_skb(skb); netif_receive_skb(skb);
else else
@ -2631,7 +2608,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
goto exit_loop; goto exit_loop;
break; break;
#ifdef SKY2_VLAN_TAG_USED
case OP_RXVLAN: case OP_RXVLAN:
sky2->rx_tag = length; sky2->rx_tag = length;
break; break;
@ -2639,7 +2615,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
case OP_RXCHKSVLAN: case OP_RXCHKSVLAN:
sky2->rx_tag = length; sky2->rx_tag = length;
/* fall through */ /* fall through */
#endif
case OP_RXCHKS: case OP_RXCHKS:
if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
sky2_rx_checksum(sky2, status); sky2_rx_checksum(sky2, status);
@ -3042,6 +3017,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
| SKY2_HW_NEW_LE | SKY2_HW_NEW_LE
| SKY2_HW_AUTO_TX_SUM | SKY2_HW_AUTO_TX_SUM
| SKY2_HW_ADV_POWER_CTL; | SKY2_HW_ADV_POWER_CTL;
/* The workaround for status conflicts VLAN tag detection. */
if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
hw->flags |= SKY2_HW_VLAN_BROKEN;
break; break;
case CHIP_ID_YUKON_SUPR: case CHIP_ID_YUKON_SUPR:
@ -3411,18 +3390,15 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
u32 modes = SUPPORTED_10baseT_Half u32 modes = SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Full;
| SUPPORTED_Autoneg | SUPPORTED_TP;
if (hw->flags & SKY2_HW_GIGABIT) if (hw->flags & SKY2_HW_GIGABIT)
modes |= SUPPORTED_1000baseT_Half modes |= SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full; | SUPPORTED_1000baseT_Full;
return modes; return modes;
} else } else
return SUPPORTED_1000baseT_Half return SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Full;
| SUPPORTED_Autoneg
| SUPPORTED_FIBRE;
} }
static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@ -3436,9 +3412,11 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (sky2_is_copper(hw)) { if (sky2_is_copper(hw)) {
ecmd->port = PORT_TP; ecmd->port = PORT_TP;
ecmd->speed = sky2->speed; ecmd->speed = sky2->speed;
ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
} else { } else {
ecmd->speed = SPEED_1000; ecmd->speed = SPEED_1000;
ecmd->port = PORT_FIBRE; ecmd->port = PORT_FIBRE;
ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
} }
ecmd->advertising = sky2->advertising; ecmd->advertising = sky2->advertising;
@ -3455,8 +3433,19 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
u32 supported = sky2_supported_modes(hw); u32 supported = sky2_supported_modes(hw);
if (ecmd->autoneg == AUTONEG_ENABLE) { if (ecmd->autoneg == AUTONEG_ENABLE) {
if (ecmd->advertising & ~supported)
return -EINVAL;
if (sky2_is_copper(hw))
sky2->advertising = ecmd->advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
else
sky2->advertising = ecmd->advertising |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
sky2->flags |= SKY2_FLAG_AUTO_SPEED; sky2->flags |= SKY2_FLAG_AUTO_SPEED;
ecmd->advertising = supported;
sky2->duplex = -1; sky2->duplex = -1;
sky2->speed = -1; sky2->speed = -1;
} else { } else {
@ -3500,8 +3489,6 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
} }
sky2->advertising = ecmd->advertising;
if (netif_running(dev)) { if (netif_running(dev)) {
sky2_phy_reinit(sky2); sky2_phy_reinit(sky2);
sky2_set_multicast(dev); sky2_set_multicast(dev);
@ -4229,15 +4216,28 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
static int sky2_set_flags(struct net_device *dev, u32 data) static int sky2_set_flags(struct net_device *dev, u32 data)
{ {
struct sky2_port *sky2 = netdev_priv(dev); struct sky2_port *sky2 = netdev_priv(dev);
u32 supported = unsigned long old_feat = dev->features;
(sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH; u32 supported = 0;
int rc; int rc;
if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
supported |= ETH_FLAG_RXHASH;
if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
supported, data);
rc = ethtool_op_set_flags(dev, data, supported); rc = ethtool_op_set_flags(dev, data, supported);
if (rc) if (rc)
return rc; return rc;
rx_set_rss(dev); if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
rx_set_rss(dev);
if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
sky2_vlan_mode(dev);
return 0; return 0;
} }
@ -4273,6 +4273,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.get_sset_count = sky2_get_sset_count, .get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats, .get_ethtool_stats = sky2_get_ethtool_stats,
.set_flags = sky2_set_flags, .set_flags = sky2_set_flags,
.get_flags = ethtool_op_get_flags,
}; };
#ifdef CONFIG_SKY2_DEBUG #ifdef CONFIG_SKY2_DEBUG
@ -4554,9 +4555,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_change_mtu = sky2_change_mtu, .ndo_change_mtu = sky2_change_mtu,
.ndo_tx_timeout = sky2_tx_timeout, .ndo_tx_timeout = sky2_tx_timeout,
.ndo_get_stats64 = sky2_get_stats, .ndo_get_stats64 = sky2_get_stats,
#ifdef SKY2_VLAN_TAG_USED
.ndo_vlan_rx_register = sky2_vlan_rx_register,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sky2_netpoll, .ndo_poll_controller = sky2_netpoll,
#endif #endif
@ -4572,9 +4570,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_change_mtu = sky2_change_mtu, .ndo_change_mtu = sky2_change_mtu,
.ndo_tx_timeout = sky2_tx_timeout, .ndo_tx_timeout = sky2_tx_timeout,
.ndo_get_stats64 = sky2_get_stats, .ndo_get_stats64 = sky2_get_stats,
#ifdef SKY2_VLAN_TAG_USED
.ndo_vlan_rx_register = sky2_vlan_rx_register,
#endif
}, },
}; };
@ -4625,7 +4620,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->port = port; sky2->port = port;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
| NETIF_F_TSO | NETIF_F_GRO; | NETIF_F_TSO | NETIF_F_GRO;
if (highmem) if (highmem)
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;
@ -4633,13 +4629,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
if (!(hw->flags & SKY2_HW_RSS_BROKEN)) if (!(hw->flags & SKY2_HW_RSS_BROKEN))
dev->features |= NETIF_F_RXHASH; dev->features |= NETIF_F_RXHASH;
#ifdef SKY2_VLAN_TAG_USED if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
/* The workaround for FE+ status conflicts with VLAN tag detection. */
if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
}
#endif
/* read the mac address */ /* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);

View File

@ -2236,11 +2236,8 @@ struct sky2_port {
u16 rx_pending; u16 rx_pending;
u16 rx_data_size; u16 rx_data_size;
u16 rx_nfrags; u16 rx_nfrags;
#ifdef SKY2_VLAN_TAG_USED
u16 rx_tag; u16 rx_tag;
struct vlan_group *vlgrp;
#endif
struct { struct {
unsigned long last; unsigned long last;
u32 mac_rp; u32 mac_rp;
@ -2284,6 +2281,7 @@ struct sky2_hw {
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
#define SKY2_HW_RSS_BROKEN 0x00000100 #define SKY2_HW_RSS_BROKEN 0x00000100
#define SKY2_HW_VLAN_BROKEN 0x00000200
u8 chip_id; u8 chip_id;
u8 chip_rev; u8 chip_rev;

View File

@ -488,7 +488,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!netif_carrier_ok(dev) || if (unlikely(!netif_carrier_ok(dev) ||
(frags > 1 && !xennet_can_sg(dev)) || (frags > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(dev, skb))) { netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irq(&np->tx_lock); spin_unlock_irq(&np->tx_lock);
goto drop; goto drop;
} }

View File

@ -24,6 +24,7 @@ struct bfin_mii_bus_platform_data {
const unsigned short *mac_peripherals; const unsigned short *mac_peripherals;
int phy_mode; int phy_mode;
unsigned int phy_mask; unsigned int phy_mask;
unsigned short vlan1_mask, vlan2_mask;
}; };
#endif #endif

View File

@ -48,8 +48,10 @@ extern int eth_validate_addr(struct net_device *dev);
extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count); extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
unsigned int rxqs);
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
/** /**
* is_zero_ether_addr - Determine if give Ethernet address is all zeros. * is_zero_ether_addr - Determine if give Ethernet address is all zeros.

View File

@ -3,6 +3,8 @@
* Copyright (c) 2009 Orex Computed Radiography * Copyright (c) 2009 Orex Computed Radiography
* Baruch Siach <baruch@tkos.co.il> * Baruch Siach <baruch@tkos.co.il>
* *
* Copyright (C) 2010 Freescale Semiconductor, Inc.
*
* Header file for the FEC platform data * Header file for the FEC platform data
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
@ -16,6 +18,7 @@
struct fec_platform_data { struct fec_platform_data {
phy_interface_t phy; phy_interface_t phy;
unsigned char mac[ETH_ALEN];
}; };
#endif #endif

View File

@ -103,7 +103,7 @@ struct __fdb_entry {
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
typedef int (*br_should_route_hook_t)(struct sk_buff *skb); typedef int br_should_route_hook_t(struct sk_buff *skb);
extern br_should_route_hook_t __rcu *br_should_route_hook; extern br_should_route_hook_t __rcu *br_should_route_hook;
#endif #endif

View File

@ -2191,11 +2191,15 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
extern void ether_setup(struct net_device *dev); extern void ether_setup(struct net_device *dev);
/* Support for loadable net-drivers */ /* Support for loadable net-drivers */
extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
void (*setup)(struct net_device *), void (*setup)(struct net_device *),
unsigned int queue_count); unsigned int txqs, unsigned int rxqs);
#define alloc_netdev(sizeof_priv, name, setup) \ #define alloc_netdev(sizeof_priv, name, setup) \
alloc_netdev_mq(sizeof_priv, name, setup, 1) alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
extern int register_netdev(struct net_device *dev); extern int register_netdev(struct net_device *dev);
extern void unregister_netdev(struct net_device *dev); extern void unregister_netdev(struct net_device *dev);
@ -2303,7 +2307,7 @@ unsigned long netdev_fix_features(unsigned long features, const char *name);
void netif_stacked_transfer_operstate(const struct net_device *rootdev, void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev); struct net_device *dev);
int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev); int netif_skb_features(struct sk_buff *skb);
static inline int net_gso_ok(int features, int gso_type) static inline int net_gso_ok(int features, int gso_type)
{ {
@ -2317,16 +2321,10 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features)
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
} }
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) static inline int netif_needs_gso(struct sk_buff *skb, int features)
{ {
if (skb_is_gso(skb)) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
int features = netif_get_vlan_features(skb, dev); unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
return (!skb_gso_ok(skb, features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
return 0;
} }
static inline void netif_set_gso_max_size(struct net_device *dev, static inline void netif_set_gso_max_size(struct net_device *dev,

View File

@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info);
* necessary for reading the counters. * necessary for reading the counters.
*/ */
struct xt_info_lock { struct xt_info_lock {
spinlock_t lock; seqlock_t lock;
unsigned char readers; unsigned char readers;
}; };
DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void)
local_bh_disable(); local_bh_disable();
lock = &__get_cpu_var(xt_info_locks); lock = &__get_cpu_var(xt_info_locks);
if (likely(!lock->readers++)) if (likely(!lock->readers++))
spin_lock(&lock->lock); write_seqlock(&lock->lock);
} }
static inline void xt_info_rdunlock_bh(void) static inline void xt_info_rdunlock_bh(void)
@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void)
struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
if (likely(!--lock->readers)) if (likely(!--lock->readers))
spin_unlock(&lock->lock); write_sequnlock(&lock->lock);
local_bh_enable(); local_bh_enable();
} }
@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void)
*/ */
static inline void xt_info_wrlock(unsigned int cpu) static inline void xt_info_wrlock(unsigned int cpu)
{ {
spin_lock(&per_cpu(xt_info_locks, cpu).lock); write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
} }
static inline void xt_info_wrunlock(unsigned int cpu) static inline void xt_info_wrunlock(unsigned int cpu)
{ {
spin_unlock(&per_cpu(xt_info_locks, cpu).lock); write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
} }
/* /*

View File

@ -4,7 +4,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
/* This is the maximum truncated ICV length that we know of. */ /* This is the maximum truncated ICV length that we know of. */
#define MAX_AH_AUTH_LEN 12 #define MAX_AH_AUTH_LEN 16
struct crypto_ahash; struct crypto_ahash;

View File

@ -25,5 +25,6 @@ extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
const unsigned char *src_hw, const unsigned char *src_hw,
const unsigned char *target_hw); const unsigned char *target_hw);
extern void arp_xmit(struct sk_buff *skb); extern void arp_xmit(struct sk_buff *skb);
int arp_invalidate(struct net_device *dev, __be32 ip);
#endif /* _ARP_H */ #endif /* _ARP_H */

View File

@ -107,8 +107,8 @@ struct phonet_protocol {
int sock_type; int sock_type;
}; };
int phonet_proto_register(int protocol, struct phonet_protocol *pp); int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp);
void phonet_proto_unregister(int protocol, struct phonet_protocol *pp); void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp);
int phonet_sysctl_init(void); int phonet_sysctl_init(void);
void phonet_sysctl_exit(void); void phonet_sysctl_exit(void);

View File

@ -207,7 +207,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
return q->q.qlen; return q->q.qlen;
} }
static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
{ {
return (struct qdisc_skb_cb *)skb->cb; return (struct qdisc_skb_cb *)skb->cb;
} }
@ -394,7 +394,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev)
return true; return true;
} }
static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
{ {
return qdisc_skb_cb(skb)->pkt_len; return qdisc_skb_cb(skb)->pkt_len;
} }
@ -426,10 +426,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
} }
static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
const struct sk_buff *skb)
{ {
sch->bstats.bytes += len; bstats->bytes += qdisc_pkt_len(skb);
sch->bstats.packets++; bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
}
static inline void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
bstats_update(&sch->bstats, skb);
} }
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@ -437,7 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
{ {
__skb_queue_tail(list, skb); __skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb); sch->qstats.backlog += qdisc_pkt_len(skb);
__qdisc_update_bstats(sch, qdisc_pkt_len(skb)); qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }

View File

@ -152,14 +152,18 @@ struct sock_common {
* fields between dontcopy_begin/dontcopy_end * fields between dontcopy_begin/dontcopy_end
* are not copied in sock_copy() * are not copied in sock_copy()
*/ */
/* private: */
int skc_dontcopy_begin[0]; int skc_dontcopy_begin[0];
/* public: */
union { union {
struct hlist_node skc_node; struct hlist_node skc_node;
struct hlist_nulls_node skc_nulls_node; struct hlist_nulls_node skc_nulls_node;
}; };
int skc_tx_queue_mapping; int skc_tx_queue_mapping;
atomic_t skc_refcnt; atomic_t skc_refcnt;
/* private: */
int skc_dontcopy_end[0]; int skc_dontcopy_end[0];
/* public: */
}; };
/** /**

View File

@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock,
if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
return -ENOPROTOOPT; return -ENOPROTOOPT;
lock_sock(&(cf_sk->sk)); lock_sock(&(cf_sk->sk));
cf_sk->conn_req.param.size = ol;
if (ol > sizeof(cf_sk->conn_req.param.data) || if (ol > sizeof(cf_sk->conn_req.param.data) ||
copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
release_sock(&cf_sk->sk); release_sock(&cf_sk->sk);
return -EINVAL; return -EINVAL;
} }
cf_sk->conn_req.param.size = ol;
release_sock(&cf_sk->sk); release_sock(&cf_sk->sk);
return 0; return 0;

View File

@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
int pktlen; int pktlen;
int err = 0; int err = 0;
const u8 *ip_version;
u8 buf;
priv = container_of(layr, struct chnl_net, chnl); priv = container_of(layr, struct chnl_net, chnl);
@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
* send the packet to the net stack. * send the packet to the net stack.
*/ */
skb->dev = priv->netdev; skb->dev = priv->netdev;
skb->protocol = htons(ETH_P_IP);
/* check the version of IP */
ip_version = skb_header_pointer(skb, 0, 1, &buf);
if (!ip_version)
return -EINVAL;
switch (*ip_version >> 4) {
case 4:
skb->protocol = htons(ETH_P_IP);
break;
case 6:
skb->protocol = htons(ETH_P_IPV6);
break;
default:
return -EINVAL;
}
/* If we change the header in loop mode, the checksum is corrupted. */ /* If we change the header in loop mode, the checksum is corrupted. */
if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)

View File

@ -1732,33 +1732,6 @@ void netif_device_attach(struct net_device *dev)
} }
EXPORT_SYMBOL(netif_device_attach); EXPORT_SYMBOL(netif_device_attach);
static bool can_checksum_protocol(unsigned long features, __be16 protocol)
{
return ((features & NETIF_F_NO_CSUM) ||
((features & NETIF_F_V4_CSUM) &&
protocol == htons(ETH_P_IP)) ||
((features & NETIF_F_V6_CSUM) &&
protocol == htons(ETH_P_IPV6)) ||
((features & NETIF_F_FCOE_CRC) &&
protocol == htons(ETH_P_FCOE)));
}
static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
int features = dev->features;
if (vlan_tx_tag_present(skb)) {
features &= dev->vlan_features;
} else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
features &= dev->vlan_features;
}
return can_checksum_protocol(features, protocol);
}
/** /**
* skb_dev_set -- assign a new device to a buffer * skb_dev_set -- assign a new device to a buffer
* @skb: buffer for the new device * @skb: buffer for the new device
@ -1971,16 +1944,14 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
/** /**
* dev_gso_segment - Perform emulated hardware segmentation on skb. * dev_gso_segment - Perform emulated hardware segmentation on skb.
* @skb: buffer to segment * @skb: buffer to segment
* @features: device features as applicable to this skb
* *
* This function segments the given skb and stores the list of segments * This function segments the given skb and stores the list of segments
* in skb->next. * in skb->next.
*/ */
static int dev_gso_segment(struct sk_buff *skb) static int dev_gso_segment(struct sk_buff *skb, int features)
{ {
struct net_device *dev = skb->dev;
struct sk_buff *segs; struct sk_buff *segs;
int features = dev->features & ~(illegal_highdma(dev, skb) ?
NETIF_F_SG : 0);
segs = skb_gso_segment(skb, features); segs = skb_gso_segment(skb, features);
@ -2017,22 +1988,52 @@ static inline void skb_orphan_try(struct sk_buff *skb)
} }
} }
int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev) static bool can_checksum_protocol(unsigned long features, __be16 protocol)
{
return ((features & NETIF_F_GEN_CSUM) ||
((features & NETIF_F_V4_CSUM) &&
protocol == htons(ETH_P_IP)) ||
((features & NETIF_F_V6_CSUM) &&
protocol == htons(ETH_P_IPV6)) ||
((features & NETIF_F_FCOE_CRC) &&
protocol == htons(ETH_P_FCOE)));
}
static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
{
if (!can_checksum_protocol(protocol, features)) {
features &= ~NETIF_F_ALL_CSUM;
features &= ~NETIF_F_SG;
} else if (illegal_highdma(skb->dev, skb)) {
features &= ~NETIF_F_SG;
}
return features;
}
int netif_skb_features(struct sk_buff *skb)
{ {
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
int features = skb->dev->features;
if (protocol == htons(ETH_P_8021Q)) { if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto; protocol = veh->h_vlan_encapsulated_proto;
} else if (!skb->vlan_tci) } else if (!vlan_tx_tag_present(skb)) {
return dev->features; return harmonize_features(skb, protocol, features);
}
if (protocol != htons(ETH_P_8021Q)) features &= skb->dev->vlan_features;
return dev->features & dev->vlan_features;
else if (protocol != htons(ETH_P_8021Q)) {
return 0; return harmonize_features(skb, protocol, features);
} else {
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_GEN_CSUM;
return harmonize_features(skb, protocol, features);
}
} }
EXPORT_SYMBOL(netif_get_vlan_features); EXPORT_SYMBOL(netif_skb_features);
/* /*
* Returns true if either: * Returns true if either:
@ -2042,22 +2043,13 @@ EXPORT_SYMBOL(netif_get_vlan_features);
* support DMA from it. * support DMA from it.
*/ */
static inline int skb_needs_linearize(struct sk_buff *skb, static inline int skb_needs_linearize(struct sk_buff *skb,
struct net_device *dev) int features)
{ {
if (skb_is_nonlinear(skb)) { return skb_is_nonlinear(skb) &&
int features = dev->features; ((skb_has_frag_list(skb) &&
!(features & NETIF_F_FRAGLIST)) ||
if (vlan_tx_tag_present(skb))
features &= dev->vlan_features;
return (skb_has_frag_list(skb) &&
!(features & NETIF_F_FRAGLIST)) ||
(skb_shinfo(skb)->nr_frags && (skb_shinfo(skb)->nr_frags &&
(!(features & NETIF_F_SG) || !(features & NETIF_F_SG)));
illegal_highdma(dev, skb)));
}
return 0;
} }
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@ -2067,6 +2059,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int rc = NETDEV_TX_OK; int rc = NETDEV_TX_OK;
if (likely(!skb->next)) { if (likely(!skb->next)) {
int features;
/* /*
* If device doesnt need skb->dst, release it right now while * If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache * its hot in this cpu cache
@ -2079,8 +2073,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb_orphan_try(skb); skb_orphan_try(skb);
features = netif_skb_features(skb);
if (vlan_tx_tag_present(skb) && if (vlan_tx_tag_present(skb) &&
!(dev->features & NETIF_F_HW_VLAN_TX)) { !(features & NETIF_F_HW_VLAN_TX)) {
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
if (unlikely(!skb)) if (unlikely(!skb))
goto out; goto out;
@ -2088,13 +2084,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb->vlan_tci = 0; skb->vlan_tci = 0;
} }
if (netif_needs_gso(dev, skb)) { if (netif_needs_gso(skb, features)) {
if (unlikely(dev_gso_segment(skb))) if (unlikely(dev_gso_segment(skb, features)))
goto out_kfree_skb; goto out_kfree_skb;
if (skb->next) if (skb->next)
goto gso; goto gso;
} else { } else {
if (skb_needs_linearize(skb, dev) && if (skb_needs_linearize(skb, features) &&
__skb_linearize(skb)) __skb_linearize(skb))
goto out_kfree_skb; goto out_kfree_skb;
@ -2105,7 +2101,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_set_transport_header(skb, skb_set_transport_header(skb,
skb_checksum_start_offset(skb)); skb_checksum_start_offset(skb));
if (!dev_can_checksum(dev, skb) && if (!(features & NETIF_F_ALL_CSUM) &&
skb_checksum_help(skb)) skb_checksum_help(skb))
goto out_kfree_skb; goto out_kfree_skb;
} }
@ -2301,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
*/ */
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb); skb_dst_force(skb);
__qdisc_update_bstats(q, skb->len);
qdisc_skb_cb(skb)->pkt_len = skb->len;
qdisc_bstats_update(q, skb);
if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
if (unlikely(contended)) { if (unlikely(contended)) {
spin_unlock(&q->busylock); spin_unlock(&q->busylock);
@ -5621,18 +5620,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
} }
/** /**
* alloc_netdev_mq - allocate network device * alloc_netdev_mqs - allocate network device
* @sizeof_priv: size of private data to allocate space for * @sizeof_priv: size of private data to allocate space for
* @name: device name format string * @name: device name format string
* @setup: callback to initialize device * @setup: callback to initialize device
* @queue_count: the number of subqueues to allocate * @txqs: the number of TX subqueues to allocate
* @rxqs: the number of RX subqueues to allocate
* *
* Allocates a struct net_device with private data area for driver use * Allocates a struct net_device with private data area for driver use
* and performs basic initialization. Also allocates subquue structs * and performs basic initialization. Also allocates subquue structs
* for each queue on the device at the end of the netdevice. * for each queue on the device.
*/ */
struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
void (*setup)(struct net_device *), unsigned int queue_count) void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs)
{ {
struct net_device *dev; struct net_device *dev;
size_t alloc_size; size_t alloc_size;
@ -5640,12 +5641,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
BUG_ON(strlen(name) >= sizeof(dev->name)); BUG_ON(strlen(name) >= sizeof(dev->name));
if (queue_count < 1) { if (txqs < 1) {
pr_err("alloc_netdev: Unable to allocate device " pr_err("alloc_netdev: Unable to allocate device "
"with zero queues.\n"); "with zero queues.\n");
return NULL; return NULL;
} }
#ifdef CONFIG_RPS
if (rxqs < 1) {
pr_err("alloc_netdev: Unable to allocate device "
"with zero RX queues.\n");
return NULL;
}
#endif
alloc_size = sizeof(struct net_device); alloc_size = sizeof(struct net_device);
if (sizeof_priv) { if (sizeof_priv) {
/* ensure 32-byte alignment of private area */ /* ensure 32-byte alignment of private area */
@ -5676,14 +5685,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev_net_set(dev, &init_net); dev_net_set(dev, &init_net);
dev->num_tx_queues = queue_count; dev->num_tx_queues = txqs;
dev->real_num_tx_queues = queue_count; dev->real_num_tx_queues = txqs;
if (netif_alloc_netdev_queues(dev)) if (netif_alloc_netdev_queues(dev))
goto free_pcpu; goto free_pcpu;
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
dev->num_rx_queues = queue_count; dev->num_rx_queues = rxqs;
dev->real_num_rx_queues = queue_count; dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev)) if (netif_alloc_rx_queues(dev))
goto free_pcpu; goto free_pcpu;
#endif #endif
@ -5711,7 +5720,7 @@ free_p:
kfree(p); kfree(p);
return NULL; return NULL;
} }
EXPORT_SYMBOL(alloc_netdev_mq); EXPORT_SYMBOL(alloc_netdev_mqs);
/** /**
* free_netdev - free network device * free_netdev - free network device

View File

@ -158,7 +158,7 @@ EXPORT_SYMBOL(sk_filter);
/** /**
* sk_run_filter - run a filter on a socket * sk_run_filter - run a filter on a socket
* @skb: buffer to run the filter on * @skb: buffer to run the filter on
* @filter: filter to apply * @fentry: filter to apply
* *
* Decode and apply filter instructions to the skb->data. * Decode and apply filter instructions to the skb->data.
* Return length to keep, 0 for none. @skb is the data we are * Return length to keep, 0 for none. @skb is the data we are

View File

@ -1820,7 +1820,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
return -EPERM; return -EPERM;
if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
struct sock *rtnl; struct sock *rtnl;
rtnl_dumpit_func dumpit; rtnl_dumpit_func dumpit;

View File

@ -426,7 +426,8 @@ static inline void dccp_update_gsr(struct sock *sk, u64 seq)
{ {
struct dccp_sock *dp = dccp_sk(sk); struct dccp_sock *dp = dccp_sk(sk);
dp->dccps_gsr = seq; if (after48(seq, dp->dccps_gsr))
dp->dccps_gsr = seq;
/* Sequence validity window depends on remote Sequence Window (7.5.1) */ /* Sequence validity window depends on remote Sequence Window (7.5.1) */
dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
/* /*

View File

@ -260,7 +260,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
*/ */
if (time_before(now, (dp->dccps_rate_last + if (time_before(now, (dp->dccps_rate_last +
sysctl_dccp_sync_ratelimit))) sysctl_dccp_sync_ratelimit)))
return 0; return -1;
DCCP_WARN("Step 6 failed for %s packet, " DCCP_WARN("Step 6 failed for %s packet, "
"(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "

View File

@ -21,7 +21,8 @@
/* Boundary values */ /* Boundary values */
static int zero = 0, static int zero = 0,
u8_max = 0xFF; u8_max = 0xFF;
static unsigned long seqw_min = 32; static unsigned long seqw_min = DCCPF_SEQ_WMIN,
seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */
static struct ctl_table dccp_default_table[] = { static struct ctl_table dccp_default_table[] = {
{ {
@ -31,6 +32,7 @@ static struct ctl_table dccp_default_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_doulongvec_minmax, .proc_handler = proc_doulongvec_minmax,
.extra1 = &seqw_min, /* RFC 4340, 7.5.2 */ .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */
.extra2 = &seqw_max,
}, },
{ {
.procname = "rx_ccid", .procname = "rx_ccid",

View File

@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev)
EXPORT_SYMBOL(ether_setup); EXPORT_SYMBOL(ether_setup);
/** /**
* alloc_etherdev_mq - Allocates and sets up an Ethernet device * alloc_etherdev_mqs - Allocates and sets up an Ethernet device
* @sizeof_priv: Size of additional driver-private structure to be allocated * @sizeof_priv: Size of additional driver-private structure to be allocated
* for this Ethernet device * for this Ethernet device
* @queue_count: The number of queues this device has. * @txqs: The number of TX queues this device has.
* @txqs: The number of RX queues this device has.
* *
* Fill in the fields of the device structure with Ethernet-generic * Fill in the fields of the device structure with Ethernet-generic
* values. Basically does everything except registering the device. * values. Basically does everything except registering the device.
@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup);
* this private data area. * this private data area.
*/ */
struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
unsigned int rxqs)
{ {
return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
} }
EXPORT_SYMBOL(alloc_etherdev_mq); EXPORT_SYMBOL(alloc_etherdev_mqs);
static size_t _format_mac_addr(char *buf, int buflen, static size_t _format_mac_addr(char *buf, int buflen,
const unsigned char *addr, int len) const unsigned char *addr, int len)

View File

@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
ah = (struct ip_auth_hdr *)skb->data;
iph = ip_hdr(skb);
ihl = ip_hdrlen(skb);
if ((err = skb_cow_data(skb, 0, &trailer)) < 0) if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
goto out; goto out;
nfrags = err; nfrags = err;
ah = (struct ip_auth_hdr *)skb->data;
iph = ip_hdr(skb);
ihl = ip_hdrlen(skb);
work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
if (!work_iph) if (!work_iph)
goto out; goto out;

View File

@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
return err; return err;
} }
int arp_invalidate(struct net_device *dev, __be32 ip)
{
struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
int err = -ENXIO;
if (neigh) {
if (neigh->nud_state & ~NUD_NOARP)
err = neigh_update(neigh, NULL, NUD_FAILED,
NEIGH_UPDATE_F_OVERRIDE|
NEIGH_UPDATE_F_ADMIN);
neigh_release(neigh);
}
return err;
}
EXPORT_SYMBOL(arp_invalidate);
static int arp_req_delete_public(struct net *net, struct arpreq *r, static int arp_req_delete_public(struct net *net, struct arpreq *r,
struct net_device *dev) struct net_device *dev)
{ {
@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
{ {
int err; int err;
__be32 ip; __be32 ip;
struct neighbour *neigh;
if (r->arp_flags & ATF_PUBL) if (r->arp_flags & ATF_PUBL)
return arp_req_delete_public(net, r, dev); return arp_req_delete_public(net, r, dev);
@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
if (!dev) if (!dev)
return -EINVAL; return -EINVAL;
} }
err = -ENXIO; return arp_invalidate(dev, ip);
neigh = neigh_lookup(&arp_tbl, &ip, dev);
if (neigh) {
if (neigh->nud_state & ~NUD_NOARP)
err = neigh_update(neigh, NULL, NUD_FAILED,
NEIGH_UPDATE_F_OVERRIDE|
NEIGH_UPDATE_F_ADMIN);
neigh_release(neigh);
}
return err;
} }
/* /*

View File

@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if || !sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse || if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) { ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk)) sk2_rcv_saddr == sk_rcv_saddr(sk))
@ -122,7 +122,8 @@ again:
(tb->num_owners < smallest_size || smallest_size == -1)) { (tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners; smallest_size = tb->num_owners;
smallest_rover = rover; smallest_rover = rover;
if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
spin_unlock(&head->lock); spin_unlock(&head->lock);
snum = smallest_rover; snum = smallest_rover;
goto have_snum; goto have_snum;

View File

@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
nlmsg_len(nlh) < hdrlen) nlmsg_len(nlh) < hdrlen)
return -EINVAL; return -EINVAL;
if (nlh->nlmsg_flags & NLM_F_DUMP) { if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
if (nlmsg_attrlen(nlh, hdrlen)) { if (nlmsg_attrlen(nlh, hdrlen)) {
struct nlattr *attr; struct nlattr *attr;

View File

@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t,
struct arpt_entry *iter; struct arpt_entry *iter;
unsigned int cpu; unsigned int cpu;
unsigned int i; unsigned int i;
unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU
*
* Bottom half has to be disabled to prevent deadlock
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
continue;
i = 0; i = 0;
local_bh_disable();
xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) {
ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt;
iter->counters.pcnt); unsigned int start;
do {
start = read_seqbegin(lock);
bcnt = iter->counters.bcnt;
pcnt = iter->counters.pcnt;
} while (read_seqretry(lock, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i; ++i;
} }
xt_info_wrunlock(cpu);
local_bh_enable();
} }
put_cpu();
} }
static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table)
@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
* about). * about).
*/ */
countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc(countersize); counters = vzalloc(countersize);
if (counters == NULL) if (counters == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name,
struct arpt_entry *iter; struct arpt_entry *iter;
ret = 0; ret = 0;
counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) { if (!counters) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;

View File

@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t,
struct ipt_entry *iter; struct ipt_entry *iter;
unsigned int cpu; unsigned int cpu;
unsigned int i; unsigned int i;
unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU.
*
* Bottom half has to be disabled to prevent deadlock
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
continue;
i = 0; i = 0;
local_bh_disable();
xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) {
ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt;
iter->counters.pcnt); unsigned int start;
do {
start = read_seqbegin(lock);
bcnt = iter->counters.bcnt;
pcnt = iter->counters.pcnt;
} while (read_seqretry(lock, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i; /* macro does multi eval of i */ ++i; /* macro does multi eval of i */
} }
xt_info_wrunlock(cpu);
local_bh_enable();
} }
put_cpu();
} }
static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table)
@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care (other than comefrom, which userspace doesn't care
about). */ about). */
countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc(countersize); counters = vzalloc(countersize);
if (counters == NULL) if (counters == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct ipt_entry *iter; struct ipt_entry *iter;
ret = 0; ret = 0;
counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) { if (!counters) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;

View File

@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
if (!pskb_may_pull(skb, ah_hlen)) if (!pskb_may_pull(skb, ah_hlen))
goto out; goto out;
ip6h = ipv6_hdr(skb);
skb_push(skb, hdr_len);
if ((err = skb_cow_data(skb, 0, &trailer)) < 0) if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
goto out; goto out;
nfrags = err; nfrags = err;
ah = (struct ip_auth_hdr *)skb->data;
ip6h = ipv6_hdr(skb);
skb_push(skb, hdr_len);
work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
if (!work_iph) if (!work_iph)
goto out; goto out;

View File

@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if || !sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
(!sk->sk_reuse || !sk2->sk_reuse || (!sk->sk_reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) && ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
ipv6_rcv_saddr_equal(sk, sk2)) ipv6_rcv_saddr_equal(sk, sk2))
break; break;
} }

View File

@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t,
struct ip6t_entry *iter; struct ip6t_entry *iter;
unsigned int cpu; unsigned int cpu;
unsigned int i; unsigned int i;
unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU
*
* Bottom half has to be disabled to prevent deadlock
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
continue;
i = 0; i = 0;
local_bh_disable();
xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) {
ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt;
iter->counters.pcnt); unsigned int start;
do {
start = read_seqbegin(lock);
bcnt = iter->counters.bcnt;
pcnt = iter->counters.pcnt;
} while (read_seqretry(lock, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i; ++i;
} }
xt_info_wrunlock(cpu);
local_bh_enable();
} }
put_cpu();
} }
static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table)
@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care (other than comefrom, which userspace doesn't care
about). */ about). */
countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc(countersize); counters = vzalloc(countersize);
if (counters == NULL) if (counters == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct ip6t_entry *iter; struct ip6t_entry *iter;
ret = 0; ret = 0;
counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) { if (!counters) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;

View File

@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family; u_int8_t l3proto = nfmsg->nfgen_family;
rcu_read_lock(); spin_lock_bh(&nf_conntrack_lock);
last = (struct nf_conn *)cb->args[1]; last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
restart: restart:
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
hnnode) { hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue; continue;
ct = nf_ct_tuplehash_to_ctrack(h); ct = nf_ct_tuplehash_to_ctrack(h);
if (!atomic_inc_not_zero(&ct->ct_general.use))
continue;
/* Dump entries of a given L3 protocol number. /* Dump entries of a given L3 protocol number.
* If it is not specified, ie. l3proto == 0, * If it is not specified, ie. l3proto == 0,
* then dump everything. */ * then dump everything. */
if (l3proto && nf_ct_l3num(ct) != l3proto) if (l3proto && nf_ct_l3num(ct) != l3proto)
goto releasect; continue;
if (cb->args[1]) { if (cb->args[1]) {
if (ct != last) if (ct != last)
goto releasect; continue;
cb->args[1] = 0; cb->args[1] = 0;
} }
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
@ -681,8 +679,6 @@ restart:
if (acct) if (acct)
memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
} }
releasect:
nf_ct_put(ct);
} }
if (cb->args[1]) { if (cb->args[1]) {
cb->args[1] = 0; cb->args[1] = 0;
@ -690,7 +686,7 @@ releasect:
} }
} }
out: out:
rcu_read_unlock(); spin_unlock_bh(&nf_conntrack_lock);
if (last) if (last)
nf_ct_put(last); nf_ct_put(last);
@ -928,7 +924,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
u16 zone; u16 zone;
int err; int err;
if (nlh->nlmsg_flags & NLM_F_DUMP) if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
ctnetlink_done); ctnetlink_done);
@ -1790,7 +1786,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
u16 zone; u16 zone;
int err; int err;
if (nlh->nlmsg_flags & NLM_F_DUMP) { if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
return netlink_dump_start(ctnl, skb, nlh, return netlink_dump_start(ctnl, skb, nlh,
ctnetlink_exp_dump_table, ctnetlink_exp_dump_table,
ctnetlink_exp_done); ctnetlink_exp_done);

View File

@ -1325,7 +1325,8 @@ static int __init xt_init(void)
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
spin_lock_init(&lock->lock);
seqlock_init(&lock->lock);
lock->readers = 0; lock->readers = 0;
} }

View File

@ -519,7 +519,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
security_netlink_recv(skb, CAP_NET_ADMIN)) security_netlink_recv(skb, CAP_NET_ADMIN))
return -EPERM; return -EPERM;
if (nlh->nlmsg_flags & NLM_F_DUMP) { if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
if (ops->dumpit == NULL) if (ops->dumpit == NULL)
return -EOPNOTSUPP; return -EOPNOTSUPP;

View File

@ -37,7 +37,7 @@
/* Transport protocol registration */ /* Transport protocol registration */
static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
static struct phonet_protocol *phonet_proto_get(int protocol) static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
{ {
struct phonet_protocol *pp; struct phonet_protocol *pp;
@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = {
static DEFINE_MUTEX(proto_tab_lock); static DEFINE_MUTEX(proto_tab_lock);
int __init_or_module phonet_proto_register(int protocol, int __init_or_module phonet_proto_register(unsigned int protocol,
struct phonet_protocol *pp) struct phonet_protocol *pp)
{ {
int err = 0; int err = 0;
@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol,
} }
EXPORT_SYMBOL(phonet_proto_register); EXPORT_SYMBOL(phonet_proto_register);
void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
{ {
mutex_lock(&proto_tab_lock); mutex_lock(&proto_tab_lock);
BUG_ON(proto_tab[protocol] != pp); BUG_ON(proto_tab[protocol] != pp);

View File

@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb,
spin_lock(&p->tcf_lock); spin_lock(&p->tcf_lock);
p->tcf_tm.lastuse = jiffies; p->tcf_tm.lastuse = jiffies;
p->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&p->tcf_bstats, skb);
p->tcf_bstats.packets++;
action = p->tcf_action; action = p->tcf_action;
update_flags = p->update_flags; update_flags = p->update_flags;
spin_unlock(&p->tcf_lock); spin_unlock(&p->tcf_lock);

View File

@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
spin_lock(&ipt->tcf_lock); spin_lock(&ipt->tcf_lock);
ipt->tcf_tm.lastuse = jiffies; ipt->tcf_tm.lastuse = jiffies;
ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&ipt->tcf_bstats, skb);
ipt->tcf_bstats.packets++;
/* yes, we have to worry about both in and out dev /* yes, we have to worry about both in and out dev
worry later - danger - this API seems to have changed worry later - danger - this API seems to have changed

View File

@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
spin_lock(&m->tcf_lock); spin_lock(&m->tcf_lock);
m->tcf_tm.lastuse = jiffies; m->tcf_tm.lastuse = jiffies;
m->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&m->tcf_bstats, skb);
m->tcf_bstats.packets++;
dev = m->tcfm_dev; dev = m->tcfm_dev;
if (!dev) { if (!dev) {

View File

@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
egress = p->flags & TCA_NAT_FLAG_EGRESS; egress = p->flags & TCA_NAT_FLAG_EGRESS;
action = p->tcf_action; action = p->tcf_action;
p->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&p->tcf_bstats, skb);
p->tcf_bstats.packets++;
spin_unlock(&p->tcf_lock); spin_unlock(&p->tcf_lock);

View File

@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
bad: bad:
p->tcf_qstats.overlimits++; p->tcf_qstats.overlimits++;
done: done:
p->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&p->tcf_bstats, skb);
p->tcf_bstats.packets++;
spin_unlock(&p->tcf_lock); spin_unlock(&p->tcf_lock);
return p->tcf_action; return p->tcf_action;
} }

View File

@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
spin_lock(&police->tcf_lock); spin_lock(&police->tcf_lock);
police->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&police->tcf_bstats, skb);
police->tcf_bstats.packets++;
if (police->tcfp_ewma_rate && if (police->tcfp_ewma_rate &&
police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {

View File

@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
spin_lock(&d->tcf_lock); spin_lock(&d->tcf_lock);
d->tcf_tm.lastuse = jiffies; d->tcf_tm.lastuse = jiffies;
d->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&d->tcf_bstats, skb);
d->tcf_bstats.packets++;
/* print policy string followed by _ then packet count /* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello" * Example if this was the 3rd packet and the string was "hello"

View File

@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
spin_lock(&d->tcf_lock); spin_lock(&d->tcf_lock);
d->tcf_tm.lastuse = jiffies; d->tcf_tm.lastuse = jiffies;
d->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&d->tcf_bstats, skb);
d->tcf_bstats.packets++;
if (d->flags & SKBEDIT_F_PRIORITY) if (d->flags & SKBEDIT_F_PRIORITY)
skb->priority = d->priority; skb->priority = d->priority;

View File

@ -422,10 +422,8 @@ drop: __maybe_unused
} }
return ret; return ret;
} }
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++; bstats_update(&flow->bstats, skb);
flow->bstats.bytes += qdisc_pkt_len(skb);
flow->bstats.packets++;
/* /*
* Okay, this may seem weird. We pretend we've dropped the packet if * Okay, this may seem weird. We pretend we've dropped the packet if
* it goes via ATM. The reason for this is that the outer qdisc * it goes via ATM. The reason for this is that the outer qdisc

View File

@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, cl->q); ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
sch->bstats.packets++; qdisc_bstats_update(sch, skb);
sch->bstats.bytes += qdisc_pkt_len(skb);
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
if (!cl->next_alive) if (!cl->next_alive)
cbq_activate_class(cl); cbq_activate_class(cl);
@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
ret = qdisc_enqueue(skb, cl->q); ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
sch->bstats.packets++; qdisc_bstats_update(sch, skb);
sch->bstats.bytes += qdisc_pkt_len(skb);
if (!cl->next_alive) if (!cl->next_alive)
cbq_activate_class(cl); cbq_activate_class(cl);
return 0; return 0;

View File

@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct drr_sched *q = qdisc_priv(sch); struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl; struct drr_class *cl;
unsigned int len;
int err; int err;
cl = drr_classify(skb, sch, &err); cl = drr_classify(skb, sch, &err);
@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err; return err;
} }
len = qdisc_pkt_len(skb);
err = qdisc_enqueue(skb, cl->qdisc); err = qdisc_enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->deficit = cl->quantum; cl->deficit = cl->quantum;
} }
cl->bstats.packets++; bstats_update(&cl->bstats, skb);
cl->bstats.bytes += len; qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
sch->bstats.bytes += len;
sch->q.qlen++; sch->q.qlen++;
return err; return err;

View File

@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err; return err;
} }
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;

View File

@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl->qdisc->q.qlen == 1) if (cl->qdisc->q.qlen == 1)
set_active(cl, qdisc_pkt_len(skb)); set_active(cl, qdisc_pkt_len(skb));
cl->bstats.packets++; bstats_update(&cl->bstats, skb);
cl->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;

View File

@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
return ret; return ret;
} else { } else {
cl->bstats.packets += bstats_update(&cl->bstats, skb);
skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
cl->bstats.bytes += qdisc_pkt_len(skb);
htb_activate(q, cl); htb_activate(q, cl);
} }
sch->q.qlen++; sch->q.qlen++;
sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; qdisc_bstats_update(sch, skb);
sch->bstats.bytes += qdisc_pkt_len(skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
htb_add_to_wait_tree(q, cl, diff); htb_add_to_wait_tree(q, cl, diff);
} }
/* update byte stats except for leaves which are already updated */ /* update basic stats except for leaves which are already updated */
if (cl->level) { if (cl->level)
cl->bstats.bytes += bytes; bstats_update(&cl->bstats, skb);
cl->bstats.packets += skb_is_gso(skb)?
skb_shinfo(skb)->gso_segs:1;
}
cl = cl->parent; cl = cl->parent;
} }
} }

Some files were not shown because too many files have changed in this diff Show More