Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (42 commits) b44: Fix wedge when using netconsole. wan: cosa: drop chan->wsem on error path ep93xx-eth: check for zero MAC address on probe, not on device open NET: smc91x: Fix irq flags smsc9420: prevent BUG() if ethtool is called with interface down r8169: restore mac addr in rtl8169_remove_one and rtl_shutdown ipv4: additional update of dev_net(dev) to struct *net in ip_fragment.c, NULL ptr OOPS e100: Use pci pool to work around GFP_ATOMIC order 5 memory allocation failure sctp: on T3_RTX retransmit all the in-flight chunks pktgen: Fix netdevice unregister macvlan: fix gso_max_size setting rfkill: fix miscdev ops ath9k: set ps_default as false hso: fix soft-lockup hso: fix debug routines pktgen: Fix device name compares stmmac: do not fail when the timer cannot be used. stmmac: fixed a compilation error when use the external timer netfilter: xt_limit: fix invalid return code in limit_mt_check() Au1x00: fix crash when trying register_netdev() ...
This commit is contained in:
commit
cd79bf7b1c
|
@ -260,15 +260,12 @@ static int ieee802154_fake_close(struct net_device *dev)
|
|||
static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
skb->iif = dev->ifindex;
|
||||
skb->dev = dev;
|
||||
dev->stats.tx_packets++;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
|
||||
/* FIXME: do hardware work here ... */
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -975,7 +975,7 @@ config ENC28J60_WRITEVERIFY
|
|||
|
||||
config ETHOC
|
||||
tristate "OpenCores 10/100 Mbps Ethernet MAC support"
|
||||
depends on NET_ETHERNET && HAS_IOMEM
|
||||
depends on NET_ETHERNET && HAS_IOMEM && HAS_DMA
|
||||
select MII
|
||||
select PHYLIB
|
||||
select CRC32
|
||||
|
|
|
@ -628,15 +628,6 @@ static int ep93xx_open(struct net_device *dev)
|
|||
if (ep93xx_alloc_buffers(ep))
|
||||
return -ENOMEM;
|
||||
|
||||
if (is_zero_ether_addr(dev->dev_addr)) {
|
||||
random_ether_addr(dev->dev_addr);
|
||||
printk(KERN_INFO "%s: generated random MAC address "
|
||||
"%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name,
|
||||
dev->dev_addr[0], dev->dev_addr[1],
|
||||
dev->dev_addr[2], dev->dev_addr[3],
|
||||
dev->dev_addr[4], dev->dev_addr[5]);
|
||||
}
|
||||
|
||||
napi_enable(&ep->napi);
|
||||
|
||||
if (ep93xx_start_hw(dev)) {
|
||||
|
@ -877,6 +868,9 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
|
|||
ep->mii.mdio_write = ep93xx_mdio_write;
|
||||
ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */
|
||||
|
||||
if (is_zero_ether_addr(dev->dev_addr))
|
||||
random_ether_addr(dev->dev_addr);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to register netdev\n");
|
||||
|
|
|
@ -1088,7 +1088,14 @@ static struct net_device * au1000_probe(int port_num)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if ((err = register_netdev(dev)) != 0) {
|
||||
dev->base_addr = base;
|
||||
dev->irq = irq;
|
||||
dev->netdev_ops = &au1000_netdev_ops;
|
||||
SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
|
||||
dev->watchdog_timeo = ETH_TX_TIMEOUT;
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err != 0) {
|
||||
printk(KERN_ERR "%s: Cannot register net device, error %d\n",
|
||||
DRV_NAME, err);
|
||||
free_netdev(dev);
|
||||
|
@ -1209,12 +1216,6 @@ static struct net_device * au1000_probe(int port_num)
|
|||
aup->tx_db_inuse[i] = pDB;
|
||||
}
|
||||
|
||||
dev->base_addr = base;
|
||||
dev->irq = irq;
|
||||
dev->netdev_ops = &au1000_netdev_ops;
|
||||
SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
|
||||
dev->watchdog_timeo = ETH_TX_TIMEOUT;
|
||||
|
||||
/*
|
||||
* The boot code uses the ethernet controller, so reset it to start
|
||||
* fresh. au1000_init() expects that the device is in reset state.
|
||||
|
|
|
@ -912,9 +912,6 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
|
|||
bp->istat = istat;
|
||||
__b44_disable_ints(bp);
|
||||
__napi_schedule(&bp->napi);
|
||||
} else {
|
||||
printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
|
||||
dev->name);
|
||||
}
|
||||
|
||||
irq_ack:
|
||||
|
|
|
@ -164,16 +164,14 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
|
|||
# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7)
|
||||
|
||||
/* EMAC mac_control register */
|
||||
#define EMAC_MACCONTROL_TXPTYPE (0x200)
|
||||
#define EMAC_MACCONTROL_TXPACEEN (0x40)
|
||||
#define EMAC_MACCONTROL_MIIEN (0x20)
|
||||
#define EMAC_MACCONTROL_GIGABITEN (0x80)
|
||||
#define EMAC_MACCONTROL_GIGABITEN_SHIFT (7)
|
||||
#define EMAC_MACCONTROL_FULLDUPLEXEN (0x1)
|
||||
#define EMAC_MACCONTROL_TXPTYPE BIT(9)
|
||||
#define EMAC_MACCONTROL_TXPACEEN BIT(6)
|
||||
#define EMAC_MACCONTROL_GMIIEN BIT(5)
|
||||
#define EMAC_MACCONTROL_GIGABITEN BIT(7)
|
||||
#define EMAC_MACCONTROL_FULLDUPLEXEN BIT(0)
|
||||
#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15)
|
||||
|
||||
/* GIGABIT MODE related bits */
|
||||
#define EMAC_DM646X_MACCONTORL_GMIIEN BIT(5)
|
||||
#define EMAC_DM646X_MACCONTORL_GIG BIT(7)
|
||||
#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17)
|
||||
|
||||
|
@ -192,10 +190,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
|
|||
#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF)
|
||||
|
||||
/* MAC_IN_VECTOR (0x180) register bit fields */
|
||||
#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT (0x20000)
|
||||
#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT (0x10000)
|
||||
#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC (0x0100)
|
||||
#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC (0x01)
|
||||
#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT BIT(17)
|
||||
#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT BIT(16)
|
||||
#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC BIT(8)
|
||||
#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC BIT(0)
|
||||
|
||||
/** NOTE:: For DM646x the IN_VECTOR has changed */
|
||||
#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
|
||||
|
@ -203,7 +201,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
|
|||
#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
|
||||
#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
|
||||
|
||||
|
||||
/* CPPI bit positions */
|
||||
#define EMAC_CPPI_SOP_BIT BIT(31)
|
||||
#define EMAC_CPPI_EOP_BIT BIT(30)
|
||||
|
@ -750,8 +747,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
|
|||
|
||||
if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
|
||||
mac_control = emac_read(EMAC_MACCONTROL);
|
||||
mac_control |= (EMAC_DM646X_MACCONTORL_GMIIEN |
|
||||
EMAC_DM646X_MACCONTORL_GIG |
|
||||
mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
|
||||
EMAC_DM646X_MACCONTORL_GIGFORCE);
|
||||
} else {
|
||||
/* Clear the GIG bit and GIGFORCE bit */
|
||||
|
@ -2108,7 +2104,7 @@ static int emac_hw_enable(struct emac_priv *priv)
|
|||
|
||||
/* Enable MII */
|
||||
val = emac_read(EMAC_MACCONTROL);
|
||||
val |= (EMAC_MACCONTROL_MIIEN);
|
||||
val |= (EMAC_MACCONTROL_GMIIEN);
|
||||
emac_write(EMAC_MACCONTROL, val);
|
||||
|
||||
/* Enable NAPI and interrupts */
|
||||
|
|
|
@ -157,6 +157,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/mii.h>
|
||||
|
@ -602,6 +603,7 @@ struct nic {
|
|||
struct mem *mem;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
struct pci_pool *cbs_pool;
|
||||
dma_addr_t cbs_dma_addr;
|
||||
u8 adaptive_ifs;
|
||||
u8 tx_threshold;
|
||||
|
@ -1793,9 +1795,7 @@ static void e100_clean_cbs(struct nic *nic)
|
|||
nic->cb_to_clean = nic->cb_to_clean->next;
|
||||
nic->cbs_avail++;
|
||||
}
|
||||
pci_free_consistent(nic->pdev,
|
||||
sizeof(struct cb) * nic->params.cbs.count,
|
||||
nic->cbs, nic->cbs_dma_addr);
|
||||
pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
|
||||
nic->cbs = NULL;
|
||||
nic->cbs_avail = 0;
|
||||
}
|
||||
|
@ -1813,8 +1813,8 @@ static int e100_alloc_cbs(struct nic *nic)
|
|||
nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
|
||||
nic->cbs_avail = 0;
|
||||
|
||||
nic->cbs = pci_alloc_consistent(nic->pdev,
|
||||
sizeof(struct cb) * count, &nic->cbs_dma_addr);
|
||||
nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
|
||||
&nic->cbs_dma_addr);
|
||||
if (!nic->cbs)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2841,7 +2841,11 @@ static int __devinit e100_probe(struct pci_dev *pdev,
|
|||
DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
|
||||
goto err_out_free;
|
||||
}
|
||||
|
||||
nic->cbs_pool = pci_pool_create(netdev->name,
|
||||
nic->pdev,
|
||||
nic->params.cbs.count * sizeof(struct cb),
|
||||
sizeof(u32),
|
||||
0);
|
||||
DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
|
||||
(unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
|
||||
pdev->irq, netdev->dev_addr);
|
||||
|
@ -2871,6 +2875,7 @@ static void __devexit e100_remove(struct pci_dev *pdev)
|
|||
unregister_netdev(netdev);
|
||||
e100_free(nic);
|
||||
pci_iounmap(pdev, nic->csr);
|
||||
pci_pool_destroy(nic->cbs_pool);
|
||||
free_netdev(netdev);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
|
|
@ -141,6 +141,8 @@ struct e1000_info;
|
|||
#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
|
||||
#define HV_TNCRS_LOWER PHY_REG(778, 30)
|
||||
|
||||
#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
|
||||
|
||||
/* BM PHY Copper Specific Status */
|
||||
#define BM_CS_STATUS 17
|
||||
#define BM_CS_STATUS_LINK_UP 0x0400
|
||||
|
|
|
@ -327,10 +327,18 @@ static int e1000_set_pauseparam(struct net_device *netdev,
|
|||
|
||||
hw->fc.current_mode = hw->fc.requested_mode;
|
||||
|
||||
retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
|
||||
hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw));
|
||||
if (hw->phy.media_type == e1000_media_type_fiber) {
|
||||
retval = hw->mac.ops.setup_link(hw);
|
||||
/* implicit goto out */
|
||||
} else {
|
||||
retval = e1000e_force_mac_fc(hw);
|
||||
if (retval)
|
||||
goto out;
|
||||
e1000e_set_fc_watermarks(hw);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
clear_bit(__E1000_RESETTING, &adapter->state);
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -1118,7 +1118,8 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
|
|||
oem_reg |= HV_OEM_BITS_LPLU;
|
||||
}
|
||||
/* Restart auto-neg to activate the bits */
|
||||
oem_reg |= HV_OEM_BITS_RESTART_AN;
|
||||
if (!e1000_check_reset_block(hw))
|
||||
oem_reg |= HV_OEM_BITS_RESTART_AN;
|
||||
ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg);
|
||||
|
||||
out:
|
||||
|
@ -3558,6 +3559,7 @@ struct e1000_info e1000_pch_info = {
|
|||
| FLAG_HAS_AMT
|
||||
| FLAG_HAS_FLASH
|
||||
| FLAG_HAS_JUMBO_FRAMES
|
||||
| FLAG_DISABLE_FC_PAUSE_TIME /* errata */
|
||||
| FLAG_APME_IN_WUC,
|
||||
.pba = 26,
|
||||
.max_hw_frame_size = 4096,
|
||||
|
|
|
@ -2769,25 +2769,38 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
|||
/*
|
||||
* flow control settings
|
||||
*
|
||||
* The high water mark must be low enough to fit two full frame
|
||||
* The high water mark must be low enough to fit one full frame
|
||||
* (or the size used for early receive) above it in the Rx FIFO.
|
||||
* Set it to the lower of:
|
||||
* - 90% of the Rx FIFO size, and
|
||||
* - the full Rx FIFO size minus the early receive size (for parts
|
||||
* with ERT support assuming ERT set to E1000_ERT_2048), or
|
||||
* - the full Rx FIFO size minus two full frames
|
||||
* - the full Rx FIFO size minus one full frame
|
||||
*/
|
||||
if ((adapter->flags & FLAG_HAS_ERT) &&
|
||||
(adapter->netdev->mtu > ETH_DATA_LEN))
|
||||
hwm = min(((pba << 10) * 9 / 10),
|
||||
((pba << 10) - (E1000_ERT_2048 << 3)));
|
||||
else
|
||||
hwm = min(((pba << 10) * 9 / 10),
|
||||
((pba << 10) - (2 * adapter->max_frame_size)));
|
||||
if (hw->mac.type == e1000_pchlan) {
|
||||
/*
|
||||
* Workaround PCH LOM adapter hangs with certain network
|
||||
* loads. If hangs persist, try disabling Tx flow control.
|
||||
*/
|
||||
if (adapter->netdev->mtu > ETH_DATA_LEN) {
|
||||
fc->high_water = 0x3500;
|
||||
fc->low_water = 0x1500;
|
||||
} else {
|
||||
fc->high_water = 0x5000;
|
||||
fc->low_water = 0x3000;
|
||||
}
|
||||
} else {
|
||||
if ((adapter->flags & FLAG_HAS_ERT) &&
|
||||
(adapter->netdev->mtu > ETH_DATA_LEN))
|
||||
hwm = min(((pba << 10) * 9 / 10),
|
||||
((pba << 10) - (E1000_ERT_2048 << 3)));
|
||||
else
|
||||
hwm = min(((pba << 10) * 9 / 10),
|
||||
((pba << 10) - adapter->max_frame_size));
|
||||
|
||||
fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
|
||||
fc->low_water = (fc->high_water - (2 * adapter->max_frame_size));
|
||||
fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */
|
||||
fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
|
||||
fc->low_water = fc->high_water - 8;
|
||||
}
|
||||
|
||||
if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
|
||||
fc->pause_time = 0xFFFF;
|
||||
|
@ -2813,6 +2826,10 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
|||
if (mac->ops.init_hw(hw))
|
||||
e_err("Hardware Error\n");
|
||||
|
||||
/* additional part of the flow-control workaround above */
|
||||
if (hw->mac.type == e1000_pchlan)
|
||||
ew32(FCRTV_PCH, 0x1000);
|
||||
|
||||
e1000_update_mng_vlan(adapter);
|
||||
|
||||
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
|
||||
|
@ -3610,7 +3627,7 @@ static void e1000_watchdog_task(struct work_struct *work)
|
|||
case SPEED_100:
|
||||
txb2b = 0;
|
||||
netdev->tx_queue_len = 100;
|
||||
/* maybe add some timeout factor ? */
|
||||
adapter->tx_timeout_factor = 10;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -4288,8 +4305,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
|
||||
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
|
||||
msleep(1);
|
||||
/* e1000e_down has a dependency on max_frame_size */
|
||||
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
|
||||
adapter->max_frame_size = max_frame;
|
||||
e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
|
||||
netdev->mtu = new_mtu;
|
||||
if (netif_running(netdev))
|
||||
e1000e_down(adapter);
|
||||
|
||||
|
@ -4319,9 +4338,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
|
||||
+ ETH_FCS_LEN;
|
||||
|
||||
e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
|
||||
netdev->mtu = new_mtu;
|
||||
|
||||
if (netif_running(netdev))
|
||||
e1000e_up(adapter);
|
||||
else
|
||||
|
|
|
@ -71,7 +71,6 @@ static const u16 e1000_igp_2_cable_length_table[] =
|
|||
#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
|
||||
#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
|
||||
#define I82577_CTRL_REG 23
|
||||
#define I82577_CTRL_DOWNSHIFT_MASK (7 << 10)
|
||||
|
||||
/* 82577 specific PHY registers */
|
||||
#define I82577_PHY_CTRL_2 18
|
||||
|
@ -660,15 +659,6 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
|
|||
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
|
||||
|
||||
ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
/* Set number of link attempts before downshift */
|
||||
ret_val = phy->ops.read_phy_reg(hw, I82577_CTRL_REG, &phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK;
|
||||
ret_val = phy->ops.write_phy_reg(hw, I82577_CTRL_REG, phy_data);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
|
@ -2658,19 +2648,18 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
|
|||
page = 0;
|
||||
|
||||
if (reg > MAX_PHY_MULTI_PAGE_REG) {
|
||||
if ((hw->phy.type != e1000_phy_82578) ||
|
||||
((reg != I82578_ADDR_REG) &&
|
||||
(reg != I82578_ADDR_REG + 1))) {
|
||||
u32 phy_addr = hw->phy.addr;
|
||||
u32 phy_addr = hw->phy.addr;
|
||||
|
||||
hw->phy.addr = 1;
|
||||
hw->phy.addr = 1;
|
||||
|
||||
/* Page is shifted left, PHY expects (page x 32) */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(page << IGP_PAGE_SHIFT));
|
||||
hw->phy.addr = phy_addr;
|
||||
}
|
||||
/* Page is shifted left, PHY expects (page x 32) */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(page << IGP_PAGE_SHIFT));
|
||||
hw->phy.addr = phy_addr;
|
||||
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
|
||||
|
@ -2678,7 +2667,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
|
|||
out:
|
||||
/* Revert to MDIO fast mode, if applicable */
|
||||
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
|
||||
ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
|
||||
|
||||
if (!locked)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
@ -2784,19 +2773,18 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
|
|||
}
|
||||
|
||||
if (reg > MAX_PHY_MULTI_PAGE_REG) {
|
||||
if ((hw->phy.type != e1000_phy_82578) ||
|
||||
((reg != I82578_ADDR_REG) &&
|
||||
(reg != I82578_ADDR_REG + 1))) {
|
||||
u32 phy_addr = hw->phy.addr;
|
||||
u32 phy_addr = hw->phy.addr;
|
||||
|
||||
hw->phy.addr = 1;
|
||||
hw->phy.addr = 1;
|
||||
|
||||
/* Page is shifted left, PHY expects (page x 32) */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(page << IGP_PAGE_SHIFT));
|
||||
hw->phy.addr = phy_addr;
|
||||
}
|
||||
/* Page is shifted left, PHY expects (page x 32) */
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw,
|
||||
IGP01E1000_PHY_PAGE_SELECT,
|
||||
(page << IGP_PAGE_SHIFT));
|
||||
hw->phy.addr = phy_addr;
|
||||
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
|
||||
|
@ -2805,7 +2793,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
|
|||
out:
|
||||
/* Revert to MDIO fast mode, if applicable */
|
||||
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
|
||||
ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
|
||||
|
||||
if (!locked)
|
||||
hw->phy.ops.release_phy(hw);
|
||||
|
|
|
@ -240,11 +240,11 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
|
|||
static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *tx_ring)
|
||||
{
|
||||
int tc;
|
||||
u32 txoff = IXGBE_TFCS_TXOFF;
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
int tc;
|
||||
int reg_idx = tx_ring->reg_idx;
|
||||
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
|
||||
|
||||
|
|
|
@ -568,6 +568,16 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
|
|||
iowrite16(*wptr++, ks->hw_addr);
|
||||
}
|
||||
|
||||
static void ks_disable_int(struct ks_net *ks)
|
||||
{
|
||||
ks_wrreg16(ks, KS_IER, 0x0000);
|
||||
} /* ks_disable_int */
|
||||
|
||||
static void ks_enable_int(struct ks_net *ks)
|
||||
{
|
||||
ks_wrreg16(ks, KS_IER, ks->rc_ier);
|
||||
} /* ks_enable_int */
|
||||
|
||||
/**
|
||||
* ks_tx_fifo_space - return the available hardware buffer size.
|
||||
* @ks: The chip information
|
||||
|
@ -681,6 +691,47 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op)
|
|||
}
|
||||
|
||||
|
||||
void ks_enable_qmu(struct ks_net *ks)
|
||||
{
|
||||
u16 w;
|
||||
|
||||
w = ks_rdreg16(ks, KS_TXCR);
|
||||
/* Enables QMU Transmit (TXCR). */
|
||||
ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
|
||||
|
||||
/*
|
||||
* RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
|
||||
* Enable
|
||||
*/
|
||||
|
||||
w = ks_rdreg16(ks, KS_RXQCR);
|
||||
ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
|
||||
|
||||
/* Enables QMU Receive (RXCR1). */
|
||||
w = ks_rdreg16(ks, KS_RXCR1);
|
||||
ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
|
||||
ks->enabled = true;
|
||||
} /* ks_enable_qmu */
|
||||
|
||||
static void ks_disable_qmu(struct ks_net *ks)
|
||||
{
|
||||
u16 w;
|
||||
|
||||
w = ks_rdreg16(ks, KS_TXCR);
|
||||
|
||||
/* Disables QMU Transmit (TXCR). */
|
||||
w &= ~TXCR_TXE;
|
||||
ks_wrreg16(ks, KS_TXCR, w);
|
||||
|
||||
/* Disables QMU Receive (RXCR1). */
|
||||
w = ks_rdreg16(ks, KS_RXCR1);
|
||||
w &= ~RXCR1_RXE ;
|
||||
ks_wrreg16(ks, KS_RXCR1, w);
|
||||
|
||||
ks->enabled = false;
|
||||
|
||||
} /* ks_disable_qmu */
|
||||
|
||||
/**
|
||||
* ks_read_qmu - read 1 pkt data from the QMU.
|
||||
* @ks: The chip information
|
||||
|
@ -752,7 +803,7 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
|
|||
(frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
|
||||
skb_reserve(skb, 2);
|
||||
/* read data block including CRC 4 bytes */
|
||||
ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4);
|
||||
ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
|
||||
skb_put(skb, frame_hdr->len);
|
||||
skb->dev = netdev;
|
||||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
|
@ -861,7 +912,7 @@ static int ks_net_open(struct net_device *netdev)
|
|||
ks_dbg(ks, "%s - entry\n", __func__);
|
||||
|
||||
/* reset the HW */
|
||||
err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks);
|
||||
err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
|
||||
|
||||
if (err) {
|
||||
printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
|
||||
|
@ -869,6 +920,15 @@ static int ks_net_open(struct net_device *netdev)
|
|||
return err;
|
||||
}
|
||||
|
||||
/* wake up powermode to normal mode */
|
||||
ks_set_powermode(ks, PMECR_PM_NORMAL);
|
||||
mdelay(1); /* wait for normal mode to take effect */
|
||||
|
||||
ks_wrreg16(ks, KS_ISR, 0xffff);
|
||||
ks_enable_int(ks);
|
||||
ks_enable_qmu(ks);
|
||||
netif_start_queue(ks->netdev);
|
||||
|
||||
if (netif_msg_ifup(ks))
|
||||
ks_dbg(ks, "network device %s up\n", netdev->name);
|
||||
|
||||
|
@ -892,19 +952,14 @@ static int ks_net_stop(struct net_device *netdev)
|
|||
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
kfree(ks->frame_head_info);
|
||||
|
||||
mutex_lock(&ks->lock);
|
||||
|
||||
/* turn off the IRQs and ack any outstanding */
|
||||
ks_wrreg16(ks, KS_IER, 0x0000);
|
||||
ks_wrreg16(ks, KS_ISR, 0xffff);
|
||||
|
||||
/* shutdown RX process */
|
||||
ks_wrreg16(ks, KS_RXCR1, 0x0000);
|
||||
|
||||
/* shutdown TX process */
|
||||
ks_wrreg16(ks, KS_TXCR, 0x0000);
|
||||
/* shutdown RX/TX QMU */
|
||||
ks_disable_qmu(ks);
|
||||
|
||||
/* set powermode to soft power down to save power */
|
||||
ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
|
||||
|
@ -929,17 +984,8 @@ static int ks_net_stop(struct net_device *netdev)
|
|||
*/
|
||||
static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
|
||||
{
|
||||
unsigned fid = ks->fid;
|
||||
|
||||
fid = ks->fid;
|
||||
ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
|
||||
|
||||
/* reduce the tx interrupt occurrances. */
|
||||
if (!fid)
|
||||
fid |= TXFR_TXIC; /* irq on completion */
|
||||
|
||||
/* start header at txb[0] to align txw entries */
|
||||
ks->txh.txw[0] = cpu_to_le16(fid);
|
||||
ks->txh.txw[0] = 0;
|
||||
ks->txh.txw[1] = cpu_to_le16(len);
|
||||
|
||||
/* 1. set sudo-DMA mode */
|
||||
|
@ -957,16 +1003,6 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
|
|||
;
|
||||
}
|
||||
|
||||
static void ks_disable_int(struct ks_net *ks)
|
||||
{
|
||||
ks_wrreg16(ks, KS_IER, 0x0000);
|
||||
} /* ks_disable_int */
|
||||
|
||||
static void ks_enable_int(struct ks_net *ks)
|
||||
{
|
||||
ks_wrreg16(ks, KS_IER, ks->rc_ier);
|
||||
} /* ks_enable_int */
|
||||
|
||||
/**
|
||||
* ks_start_xmit - transmit packet
|
||||
* @skb : The buffer to transmit
|
||||
|
@ -1410,25 +1446,6 @@ static int ks_read_selftest(struct ks_net *ks)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void ks_disable(struct ks_net *ks)
|
||||
{
|
||||
u16 w;
|
||||
|
||||
w = ks_rdreg16(ks, KS_TXCR);
|
||||
|
||||
/* Disables QMU Transmit (TXCR). */
|
||||
w &= ~TXCR_TXE;
|
||||
ks_wrreg16(ks, KS_TXCR, w);
|
||||
|
||||
/* Disables QMU Receive (RXCR1). */
|
||||
w = ks_rdreg16(ks, KS_RXCR1);
|
||||
w &= ~RXCR1_RXE ;
|
||||
ks_wrreg16(ks, KS_RXCR1, w);
|
||||
|
||||
ks->enabled = false;
|
||||
|
||||
} /* ks_disable */
|
||||
|
||||
static void ks_setup(struct ks_net *ks)
|
||||
{
|
||||
u16 w;
|
||||
|
@ -1463,7 +1480,7 @@ static void ks_setup(struct ks_net *ks)
|
|||
w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
|
||||
ks_wrreg16(ks, KS_TXCR, w);
|
||||
|
||||
w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE;
|
||||
w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
|
||||
|
||||
if (ks->promiscuous) /* bPromiscuous */
|
||||
w |= (RXCR1_RXAE | RXCR1_RXINVF);
|
||||
|
@ -1486,28 +1503,6 @@ static void ks_setup_int(struct ks_net *ks)
|
|||
ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
|
||||
} /* ks_setup_int */
|
||||
|
||||
void ks_enable(struct ks_net *ks)
|
||||
{
|
||||
u16 w;
|
||||
|
||||
w = ks_rdreg16(ks, KS_TXCR);
|
||||
/* Enables QMU Transmit (TXCR). */
|
||||
ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
|
||||
|
||||
/*
|
||||
* RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
|
||||
* Enable
|
||||
*/
|
||||
|
||||
w = ks_rdreg16(ks, KS_RXQCR);
|
||||
ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
|
||||
|
||||
/* Enables QMU Receive (RXCR1). */
|
||||
w = ks_rdreg16(ks, KS_RXCR1);
|
||||
ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
|
||||
ks->enabled = true;
|
||||
} /* ks_enable */
|
||||
|
||||
static int ks_hw_init(struct ks_net *ks)
|
||||
{
|
||||
#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
|
||||
|
@ -1612,11 +1607,9 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
|
|||
|
||||
ks_soft_reset(ks, GRR_GSR);
|
||||
ks_hw_init(ks);
|
||||
ks_disable(ks);
|
||||
ks_disable_qmu(ks);
|
||||
ks_setup(ks);
|
||||
ks_setup_int(ks);
|
||||
ks_enable_int(ks);
|
||||
ks_enable(ks);
|
||||
memcpy(netdev->dev_addr, ks->mac_addr, 6);
|
||||
|
||||
data = ks_rdreg16(ks, KS_OBCR);
|
||||
|
@ -1658,6 +1651,7 @@ static int __devexit ks8851_remove(struct platform_device *pdev)
|
|||
struct ks_net *ks = netdev_priv(netdev);
|
||||
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
||||
kfree(ks->frame_head_info);
|
||||
unregister_netdev(netdev);
|
||||
iounmap(ks->hw_addr);
|
||||
free_netdev(netdev);
|
||||
|
|
|
@ -360,6 +360,7 @@ static int macvlan_init(struct net_device *dev)
|
|||
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
|
||||
(lowerdev->state & MACVLAN_STATE_MASK);
|
||||
dev->features = lowerdev->features & MACVLAN_FEATURES;
|
||||
dev->gso_max_size = lowerdev->gso_max_size;
|
||||
dev->iflink = lowerdev->ifindex;
|
||||
dev->hard_header_len = lowerdev->hard_header_len;
|
||||
|
||||
|
@ -596,6 +597,7 @@ static int macvlan_device_event(struct notifier_block *unused,
|
|||
case NETDEV_FEAT_CHANGE:
|
||||
list_for_each_entry(vlan, &port->vlans, list) {
|
||||
vlan->dev->features = dev->features & MACVLAN_FEATURES;
|
||||
vlan->dev->gso_max_size = dev->gso_max_size;
|
||||
netdev_features_change(vlan->dev);
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -1163,6 +1163,8 @@ struct netxen_adapter {
|
|||
u32 int_vec_bit;
|
||||
u32 heartbit;
|
||||
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
|
||||
struct netxen_adapter_stats stats;
|
||||
|
||||
struct netxen_recv_context recv_ctx;
|
||||
|
|
|
@ -545,6 +545,8 @@ enum {
|
|||
#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094)
|
||||
#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098)
|
||||
#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc)
|
||||
#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac)
|
||||
#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0)
|
||||
#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128)
|
||||
#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c)
|
||||
|
||||
|
|
|
@ -383,24 +383,51 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
|
|||
|
||||
int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
|
||||
{
|
||||
__u32 reg;
|
||||
u32 mac_cfg;
|
||||
u32 cnt = 0;
|
||||
__u32 reg = 0x0200;
|
||||
u32 port = adapter->physical_port;
|
||||
u16 board_type = adapter->ahw.board_type;
|
||||
|
||||
if (port > NETXEN_NIU_MAX_XG_PORTS)
|
||||
return -EINVAL;
|
||||
|
||||
reg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
|
||||
if (mode == NETXEN_NIU_PROMISC_MODE)
|
||||
reg = (reg | 0x2000UL);
|
||||
else
|
||||
reg = (reg & ~0x2000UL);
|
||||
mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
|
||||
mac_cfg &= ~0x4;
|
||||
NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
|
||||
|
||||
if (mode == NETXEN_NIU_ALLMULTI_MODE)
|
||||
reg = (reg | 0x1000UL);
|
||||
else
|
||||
reg = (reg & ~0x1000UL);
|
||||
if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) ||
|
||||
(board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ))
|
||||
reg = (0x20 << port);
|
||||
|
||||
NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
|
||||
NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg);
|
||||
|
||||
mdelay(10);
|
||||
|
||||
while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20)
|
||||
mdelay(10);
|
||||
|
||||
if (cnt < 20) {
|
||||
|
||||
reg = NXRD32(adapter,
|
||||
NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
|
||||
|
||||
if (mode == NETXEN_NIU_PROMISC_MODE)
|
||||
reg = (reg | 0x2000UL);
|
||||
else
|
||||
reg = (reg & ~0x2000UL);
|
||||
|
||||
if (mode == NETXEN_NIU_ALLMULTI_MODE)
|
||||
reg = (reg | 0x1000UL);
|
||||
else
|
||||
reg = (reg & ~0x1000UL);
|
||||
|
||||
NXWR32(adapter,
|
||||
NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
|
||||
}
|
||||
|
||||
mac_cfg |= 0x4;
|
||||
NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -436,7 +463,7 @@ netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
|
|||
{
|
||||
u32 val = 0;
|
||||
u16 port = adapter->physical_port;
|
||||
u8 *addr = adapter->netdev->dev_addr;
|
||||
u8 *addr = adapter->mac_addr;
|
||||
|
||||
if (adapter->mc_enabled)
|
||||
return 0;
|
||||
|
@ -465,7 +492,7 @@ netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
|
|||
{
|
||||
u32 val = 0;
|
||||
u16 port = adapter->physical_port;
|
||||
u8 *addr = adapter->netdev->dev_addr;
|
||||
u8 *addr = adapter->mac_addr;
|
||||
|
||||
if (!adapter->mc_enabled)
|
||||
return 0;
|
||||
|
@ -660,7 +687,7 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
|
|||
|
||||
list_splice_tail_init(&adapter->mac_list, &del_list);
|
||||
|
||||
nx_p3_nic_add_mac(adapter, netdev->dev_addr, &del_list);
|
||||
nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
|
||||
nx_p3_nic_add_mac(adapter, bcast_addr, &del_list);
|
||||
|
||||
if (netdev->flags & IFF_PROMISC) {
|
||||
|
|
|
@ -544,6 +544,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
|
|||
continue;
|
||||
if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
|
||||
continue;
|
||||
if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
|
||||
continue;
|
||||
if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
|
||||
buf[i].data = 0x1020;
|
||||
/* skip the function enable register */
|
||||
|
|
|
@ -437,6 +437,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
|
|||
netdev->dev_addr[i] = *(p + 5 - i);
|
||||
|
||||
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
|
||||
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
|
||||
|
||||
/* set station address */
|
||||
|
||||
|
@ -459,6 +460,7 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p)
|
|||
netxen_napi_disable(adapter);
|
||||
}
|
||||
|
||||
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
|
||||
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
|
||||
adapter->macaddr_set(adapter, addr->sa_data);
|
||||
|
||||
|
@ -956,7 +958,7 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
|
|||
return err;
|
||||
}
|
||||
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
|
||||
adapter->macaddr_set(adapter, netdev->dev_addr);
|
||||
adapter->macaddr_set(adapter, adapter->mac_addr);
|
||||
|
||||
adapter->set_multi(netdev);
|
||||
adapter->set_mtu(adapter, netdev->mtu);
|
||||
|
|
|
@ -3235,6 +3235,10 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
|
|||
flush_scheduled_work();
|
||||
|
||||
unregister_netdev(dev);
|
||||
|
||||
/* restore original MAC address */
|
||||
rtl_rar_set(tp, dev->perm_addr);
|
||||
|
||||
rtl_disable_msi(pdev, tp);
|
||||
rtl8169_release_board(pdev, dev, tp->mmio_addr);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
|
@ -4881,6 +4885,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
|
|||
|
||||
rtl8169_net_suspend(dev);
|
||||
|
||||
/* restore original MAC address */
|
||||
rtl_rar_set(tp, dev->perm_addr);
|
||||
|
||||
spin_lock_irq(&tp->lock);
|
||||
|
||||
rtl8169_asic_down(ioaddr);
|
||||
|
|
|
@ -2283,7 +2283,7 @@ static int __devinit smc_drv_probe(struct platform_device *pdev)
|
|||
|
||||
ndev->irq = ires->start;
|
||||
|
||||
if (ires->flags & IRQF_TRIGGER_MASK)
|
||||
if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
|
||||
irq_flags = ires->flags & IRQF_TRIGGER_MASK;
|
||||
|
||||
ret = smc_request_attrib(pdev, ndev);
|
||||
|
|
|
@ -252,6 +252,9 @@ static int smsc9420_ethtool_get_settings(struct net_device *dev,
|
|||
{
|
||||
struct smsc9420_pdata *pd = netdev_priv(dev);
|
||||
|
||||
if (!pd->phy_dev)
|
||||
return -ENODEV;
|
||||
|
||||
cmd->maxtxpkt = 1;
|
||||
cmd->maxrxpkt = 1;
|
||||
return phy_ethtool_gset(pd->phy_dev, cmd);
|
||||
|
@ -262,6 +265,9 @@ static int smsc9420_ethtool_set_settings(struct net_device *dev,
|
|||
{
|
||||
struct smsc9420_pdata *pd = netdev_priv(dev);
|
||||
|
||||
if (!pd->phy_dev)
|
||||
return -ENODEV;
|
||||
|
||||
return phy_ethtool_sset(pd->phy_dev, cmd);
|
||||
}
|
||||
|
||||
|
@ -290,6 +296,10 @@ static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data)
|
|||
static int smsc9420_ethtool_nway_reset(struct net_device *netdev)
|
||||
{
|
||||
struct smsc9420_pdata *pd = netdev_priv(netdev);
|
||||
|
||||
if (!pd->phy_dev)
|
||||
return -ENODEV;
|
||||
|
||||
return phy_start_aneg(pd->phy_dev);
|
||||
}
|
||||
|
||||
|
@ -312,6 +322,10 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
|
|||
for (i = 0; i < 0x100; i += (sizeof(u32)))
|
||||
data[j++] = smsc9420_reg_read(pd, i);
|
||||
|
||||
// cannot read phy registers if the net device is down
|
||||
if (!phy_dev)
|
||||
return;
|
||||
|
||||
for (i = 0; i <= 31; i++)
|
||||
data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i);
|
||||
}
|
||||
|
|
|
@ -416,13 +416,8 @@ static void init_dma_desc_rings(struct net_device *dev)
|
|||
unsigned int txsize = priv->dma_tx_size;
|
||||
unsigned int rxsize = priv->dma_rx_size;
|
||||
unsigned int bfsize = priv->dma_buf_sz;
|
||||
int buff2_needed = 0;
|
||||
int dis_ic = 0;
|
||||
int buff2_needed = 0, dis_ic = 0;
|
||||
|
||||
#ifdef CONFIG_STMMAC_TIMER
|
||||
/* Using Timers disable interrupts on completion for the reception */
|
||||
dis_ic = 1;
|
||||
#endif
|
||||
/* Set the Buffer size according to the MTU;
|
||||
* indeed, in case of jumbo we need to bump-up the buffer sizes.
|
||||
*/
|
||||
|
@ -437,6 +432,11 @@ static void init_dma_desc_rings(struct net_device *dev)
|
|||
else
|
||||
bfsize = DMA_BUFFER_SIZE;
|
||||
|
||||
#ifdef CONFIG_STMMAC_TIMER
|
||||
/* Disable interrupts on completion for the reception if timer is on */
|
||||
if (likely(priv->tm->enable))
|
||||
dis_ic = 1;
|
||||
#endif
|
||||
/* If the MTU exceeds 8k so use the second buffer in the chain */
|
||||
if (bfsize >= BUF_SIZE_8KiB)
|
||||
buff2_needed = 1;
|
||||
|
@ -809,20 +809,22 @@ static void stmmac_tx(struct stmmac_priv *priv)
|
|||
|
||||
static inline void stmmac_enable_irq(struct stmmac_priv *priv)
|
||||
{
|
||||
#ifndef CONFIG_STMMAC_TIMER
|
||||
writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
|
||||
#else
|
||||
priv->tm->timer_start(tmrate);
|
||||
#ifdef CONFIG_STMMAC_TIMER
|
||||
if (likely(priv->tm->enable))
|
||||
priv->tm->timer_start(tmrate);
|
||||
else
|
||||
#endif
|
||||
writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
|
||||
}
|
||||
|
||||
static inline void stmmac_disable_irq(struct stmmac_priv *priv)
|
||||
{
|
||||
#ifndef CONFIG_STMMAC_TIMER
|
||||
writel(0, priv->dev->base_addr + DMA_INTR_ENA);
|
||||
#else
|
||||
priv->tm->timer_stop();
|
||||
#ifdef CONFIG_STMMAC_TIMER
|
||||
if (likely(priv->tm->enable))
|
||||
priv->tm->timer_stop();
|
||||
else
|
||||
#endif
|
||||
writel(0, priv->dev->base_addr + DMA_INTR_ENA);
|
||||
}
|
||||
|
||||
static int stmmac_has_work(struct stmmac_priv *priv)
|
||||
|
@ -1031,22 +1033,23 @@ static int stmmac_open(struct net_device *dev)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_STMMAC_TIMER
|
||||
priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
|
||||
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
|
||||
if (unlikely(priv->tm == NULL)) {
|
||||
pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
priv->tm->freq = tmrate;
|
||||
|
||||
/* Test if the HW timer can be actually used.
|
||||
* In case of failure continue with no timer. */
|
||||
/* Test if the external timer can be actually used.
|
||||
* In case of failure continue without timer. */
|
||||
if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
|
||||
pr_warning("stmmaceth: cannot attach the HW timer\n");
|
||||
pr_warning("stmmaceth: cannot attach the external timer.\n");
|
||||
tmrate = 0;
|
||||
priv->tm->freq = 0;
|
||||
priv->tm->timer_start = stmmac_no_timer_started;
|
||||
priv->tm->timer_stop = stmmac_no_timer_stopped;
|
||||
}
|
||||
} else
|
||||
priv->tm->enable = 1;
|
||||
#endif
|
||||
|
||||
/* Create and initialize the TX/RX descriptors chains. */
|
||||
|
@ -1322,9 +1325,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
/* Interrupt on completition only for the latest segment */
|
||||
priv->mac_type->ops->close_tx_desc(desc);
|
||||
|
||||
#ifdef CONFIG_STMMAC_TIMER
|
||||
/* Clean IC while using timers */
|
||||
priv->mac_type->ops->clear_tx_ic(desc);
|
||||
/* Clean IC while using timer */
|
||||
if (likely(priv->tm->enable))
|
||||
priv->mac_type->ops->clear_tx_ic(desc);
|
||||
#endif
|
||||
/* To avoid raise condition */
|
||||
priv->mac_type->ops->set_tx_owner(first);
|
||||
|
@ -2028,7 +2033,8 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
|
|||
|
||||
#ifdef CONFIG_STMMAC_TIMER
|
||||
priv->tm->timer_stop();
|
||||
dis_ic = 1;
|
||||
if (likely(priv->tm->enable))
|
||||
dis_ic = 1;
|
||||
#endif
|
||||
napi_disable(&priv->napi);
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
|
|||
|
||||
stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
|
||||
if (stmmac_rtc == NULL) {
|
||||
pr_error("open rtc device failed\n");
|
||||
pr_err("open rtc device failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
|
|||
|
||||
/* Periodic mode is not supported */
|
||||
if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
|
||||
pr_error("set periodic failed\n");
|
||||
pr_err("set periodic failed\n");
|
||||
rtc_irq_unregister(stmmac_rtc, &stmmac_task);
|
||||
rtc_class_close(stmmac_rtc);
|
||||
return -1;
|
||||
|
|
|
@ -26,6 +26,7 @@ struct stmmac_timer {
|
|||
void (*timer_start) (unsigned int new_freq);
|
||||
void (*timer_stop) (void);
|
||||
unsigned int freq;
|
||||
unsigned int enable;
|
||||
};
|
||||
|
||||
/* Open the HW timer device and return 0 in case of success */
|
||||
|
|
|
@ -378,7 +378,7 @@ static void dbg_dump(int line_count, const char *func_name, unsigned char *buf,
|
|||
}
|
||||
|
||||
#define DUMP(buf_, len_) \
|
||||
dbg_dump(__LINE__, __func__, buf_, len_)
|
||||
dbg_dump(__LINE__, __func__, (unsigned char *)buf_, len_)
|
||||
|
||||
#define DUMP1(buf_, len_) \
|
||||
do { \
|
||||
|
@ -1363,7 +1363,7 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
|
|||
/* reset the rts and dtr */
|
||||
/* do the actual close */
|
||||
serial->open_count--;
|
||||
kref_put(&serial->parent->ref, hso_serial_ref_free);
|
||||
|
||||
if (serial->open_count <= 0) {
|
||||
serial->open_count = 0;
|
||||
spin_lock_irq(&serial->serial_lock);
|
||||
|
@ -1383,6 +1383,8 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
|
|||
usb_autopm_put_interface(serial->parent->interface);
|
||||
|
||||
mutex_unlock(&serial->parent->mutex);
|
||||
|
||||
kref_put(&serial->parent->ref, hso_serial_ref_free);
|
||||
}
|
||||
|
||||
/* close the requested serial port */
|
||||
|
@ -1527,7 +1529,7 @@ static void tiocmget_intr_callback(struct urb *urb)
|
|||
dev_warn(&usb->dev,
|
||||
"hso received invalid serial state notification\n");
|
||||
DUMP(serial_state_notification,
|
||||
sizeof(hso_serial_state_notifation))
|
||||
sizeof(struct hso_serial_state_notification));
|
||||
} else {
|
||||
|
||||
UART_state_bitmap = le16_to_cpu(serial_state_notification->
|
||||
|
|
|
@ -210,32 +210,29 @@ rx_drop:
|
|||
static struct net_device_stats *veth_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct veth_priv *priv;
|
||||
struct net_device_stats *dev_stats;
|
||||
int cpu;
|
||||
struct veth_net_stats *stats;
|
||||
struct veth_net_stats *stats, total = {0};
|
||||
|
||||
priv = netdev_priv(dev);
|
||||
dev_stats = &dev->stats;
|
||||
|
||||
dev_stats->rx_packets = 0;
|
||||
dev_stats->tx_packets = 0;
|
||||
dev_stats->rx_bytes = 0;
|
||||
dev_stats->tx_bytes = 0;
|
||||
dev_stats->tx_dropped = 0;
|
||||
dev_stats->rx_dropped = 0;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
stats = per_cpu_ptr(priv->stats, cpu);
|
||||
|
||||
dev_stats->rx_packets += stats->rx_packets;
|
||||
dev_stats->tx_packets += stats->tx_packets;
|
||||
dev_stats->rx_bytes += stats->rx_bytes;
|
||||
dev_stats->tx_bytes += stats->tx_bytes;
|
||||
dev_stats->tx_dropped += stats->tx_dropped;
|
||||
dev_stats->rx_dropped += stats->rx_dropped;
|
||||
total.rx_packets += stats->rx_packets;
|
||||
total.tx_packets += stats->tx_packets;
|
||||
total.rx_bytes += stats->rx_bytes;
|
||||
total.tx_bytes += stats->tx_bytes;
|
||||
total.tx_dropped += stats->tx_dropped;
|
||||
total.rx_dropped += stats->rx_dropped;
|
||||
}
|
||||
dev->stats.rx_packets = total.rx_packets;
|
||||
dev->stats.tx_packets = total.tx_packets;
|
||||
dev->stats.rx_bytes = total.rx_bytes;
|
||||
dev->stats.tx_bytes = total.tx_bytes;
|
||||
dev->stats.tx_dropped = total.tx_dropped;
|
||||
dev->stats.rx_dropped = total.rx_dropped;
|
||||
|
||||
return dev_stats;
|
||||
return &dev->stats;
|
||||
}
|
||||
|
||||
static int veth_open(struct net_device *dev)
|
||||
|
|
|
@ -907,6 +907,7 @@ static ssize_t cosa_write(struct file *file,
|
|||
current->state = TASK_RUNNING;
|
||||
chan->tx_status = 1;
|
||||
spin_unlock_irqrestore(&cosa->lock, flags);
|
||||
up(&chan->wsem);
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1555,6 +1555,8 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
|
|||
BIT(NL80211_IFTYPE_ADHOC) |
|
||||
BIT(NL80211_IFTYPE_MESH_POINT);
|
||||
|
||||
hw->wiphy->ps_default = false;
|
||||
|
||||
hw->queues = 4;
|
||||
hw->max_rates = 4;
|
||||
hw->channel_change_time = 5000;
|
||||
|
|
|
@ -893,7 +893,6 @@ struct sctp_transport {
|
|||
*/
|
||||
/* RTO : The current retransmission timeout value. */
|
||||
unsigned long rto;
|
||||
unsigned long last_rto;
|
||||
|
||||
__u32 rtt; /* This is the most recent RTT. */
|
||||
|
||||
|
|
|
@ -363,6 +363,7 @@ struct pktgen_dev {
|
|||
* device name (not when the inject is
|
||||
* started as it used to do.)
|
||||
*/
|
||||
char odevname[32];
|
||||
struct flow_state *flows;
|
||||
unsigned cflows; /* Concurrent flows (config) */
|
||||
unsigned lflow; /* Flow length (config) */
|
||||
|
@ -426,7 +427,7 @@ static const char version[] =
|
|||
static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
|
||||
static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
|
||||
static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
|
||||
const char *ifname);
|
||||
const char *ifname, bool exact);
|
||||
static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
|
||||
static void pktgen_run_all_threads(void);
|
||||
static void pktgen_reset_all_threads(void);
|
||||
|
@ -528,7 +529,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
|
|||
seq_printf(seq,
|
||||
" frags: %d delay: %llu clone_skb: %d ifname: %s\n",
|
||||
pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
|
||||
pkt_dev->clone_skb, pkt_dev->odev->name);
|
||||
pkt_dev->clone_skb, pkt_dev->odevname);
|
||||
|
||||
seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
|
||||
pkt_dev->lflow);
|
||||
|
@ -1688,13 +1689,13 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
|
|||
if_lock(t);
|
||||
list_for_each_entry(pkt_dev, &t->if_list, list)
|
||||
if (pkt_dev->running)
|
||||
seq_printf(seq, "%s ", pkt_dev->odev->name);
|
||||
seq_printf(seq, "%s ", pkt_dev->odevname);
|
||||
|
||||
seq_printf(seq, "\nStopped: ");
|
||||
|
||||
list_for_each_entry(pkt_dev, &t->if_list, list)
|
||||
if (!pkt_dev->running)
|
||||
seq_printf(seq, "%s ", pkt_dev->odev->name);
|
||||
seq_printf(seq, "%s ", pkt_dev->odevname);
|
||||
|
||||
if (t->result[0])
|
||||
seq_printf(seq, "\nResult: %s\n", t->result);
|
||||
|
@ -1817,9 +1818,10 @@ static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
|
|||
{
|
||||
struct pktgen_thread *t;
|
||||
struct pktgen_dev *pkt_dev = NULL;
|
||||
bool exact = (remove == FIND);
|
||||
|
||||
list_for_each_entry(t, &pktgen_threads, th_list) {
|
||||
pkt_dev = pktgen_find_dev(t, ifname);
|
||||
pkt_dev = pktgen_find_dev(t, ifname, exact);
|
||||
if (pkt_dev) {
|
||||
if (remove) {
|
||||
if_lock(t);
|
||||
|
@ -1994,7 +1996,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
|
|||
"queue_map_min (zero-based) (%d) exceeds valid range "
|
||||
"[0 - %d] for (%d) queues on %s, resetting\n",
|
||||
pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
|
||||
pkt_dev->odev->name);
|
||||
pkt_dev->odevname);
|
||||
pkt_dev->queue_map_min = ntxq - 1;
|
||||
}
|
||||
if (pkt_dev->queue_map_max >= ntxq) {
|
||||
|
@ -2002,7 +2004,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
|
|||
"queue_map_max (zero-based) (%d) exceeds valid range "
|
||||
"[0 - %d] for (%d) queues on %s, resetting\n",
|
||||
pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
|
||||
pkt_dev->odev->name);
|
||||
pkt_dev->odevname);
|
||||
pkt_dev->queue_map_max = ntxq - 1;
|
||||
}
|
||||
|
||||
|
@ -3262,7 +3264,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
|
|||
|
||||
if (!pkt_dev->running) {
|
||||
printk(KERN_WARNING "pktgen: interface: %s is already "
|
||||
"stopped\n", pkt_dev->odev->name);
|
||||
"stopped\n", pkt_dev->odevname);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -3464,7 +3466,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|||
default: /* Drivers are not supposed to return other values! */
|
||||
if (net_ratelimit())
|
||||
pr_info("pktgen: %s xmit error: %d\n",
|
||||
odev->name, ret);
|
||||
pkt_dev->odevname, ret);
|
||||
pkt_dev->errors++;
|
||||
/* fallthru */
|
||||
case NETDEV_TX_LOCKED:
|
||||
|
@ -3566,13 +3568,18 @@ static int pktgen_thread_worker(void *arg)
|
|||
}
|
||||
|
||||
static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
|
||||
const char *ifname)
|
||||
const char *ifname, bool exact)
|
||||
{
|
||||
struct pktgen_dev *p, *pkt_dev = NULL;
|
||||
if_lock(t);
|
||||
size_t len = strlen(ifname);
|
||||
|
||||
if_lock(t);
|
||||
list_for_each_entry(p, &t->if_list, list)
|
||||
if (strncmp(p->odev->name, ifname, IFNAMSIZ) == 0) {
|
||||
if (strncmp(p->odevname, ifname, len) == 0) {
|
||||
if (p->odevname[len]) {
|
||||
if (exact || p->odevname[len] != '@')
|
||||
continue;
|
||||
}
|
||||
pkt_dev = p;
|
||||
break;
|
||||
}
|
||||
|
@ -3628,6 +3635,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
|
|||
if (!pkt_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
strcpy(pkt_dev->odevname, ifname);
|
||||
pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state));
|
||||
if (pkt_dev->flows == NULL) {
|
||||
kfree(pkt_dev);
|
||||
|
|
|
@ -563,7 +563,7 @@ out_oversize:
|
|||
printk(KERN_INFO "Oversized IP packet from %pI4.\n",
|
||||
&qp->saddr);
|
||||
out_fail:
|
||||
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS);
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -173,12 +173,14 @@ static void sta_addba_resp_timer_expired(unsigned long data)
|
|||
|
||||
/* check if the TID waits for addBA response */
|
||||
spin_lock_bh(&sta->lock);
|
||||
if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
|
||||
if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) !=
|
||||
HT_ADDBA_REQUESTED_MSK) {
|
||||
spin_unlock_bh(&sta->lock);
|
||||
*state = HT_AGG_STATE_IDLE;
|
||||
#ifdef CONFIG_MAC80211_HT_DEBUG
|
||||
printk(KERN_DEBUG "timer expired on tid %d but we are not "
|
||||
"expecting addBA response there", tid);
|
||||
"(or no longer) expecting addBA response there",
|
||||
tid);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
@ -666,21 +668,21 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
|
|||
|
||||
state = &sta->ampdu_mlme.tid_state_tx[tid];
|
||||
|
||||
del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
|
||||
|
||||
spin_lock_bh(&sta->lock);
|
||||
|
||||
if (!(*state & HT_ADDBA_REQUESTED_MSK))
|
||||
goto timer_still_needed;
|
||||
goto out;
|
||||
|
||||
if (mgmt->u.action.u.addba_resp.dialog_token !=
|
||||
sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
|
||||
#ifdef CONFIG_MAC80211_HT_DEBUG
|
||||
printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
|
||||
#endif /* CONFIG_MAC80211_HT_DEBUG */
|
||||
goto timer_still_needed;
|
||||
goto out;
|
||||
}
|
||||
|
||||
del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
|
||||
|
||||
#ifdef CONFIG_MAC80211_HT_DEBUG
|
||||
printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
|
||||
#endif /* CONFIG_MAC80211_HT_DEBUG */
|
||||
|
@ -699,10 +701,6 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
|
|||
___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
||||
timer_still_needed:
|
||||
add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
|
||||
out:
|
||||
spin_unlock_bh(&sta->lock);
|
||||
}
|
||||
|
|
|
@ -661,6 +661,14 @@ struct ieee80211_local {
|
|||
*/
|
||||
bool suspended;
|
||||
|
||||
/*
|
||||
* Resuming is true while suspended, but when we're reprogramming the
|
||||
* hardware -- at that time it's allowed to use ieee80211_queue_work()
|
||||
* again even though some other parts of the stack are still suspended
|
||||
* and we still drop received frames to avoid waking the stack.
|
||||
*/
|
||||
bool resuming;
|
||||
|
||||
/*
|
||||
* quiescing is true during the suspend process _only_ to
|
||||
* ease timer cancelling etc.
|
||||
|
|
|
@ -520,9 +520,9 @@ EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
|
|||
*/
|
||||
static bool ieee80211_can_queue_work(struct ieee80211_local *local)
|
||||
{
|
||||
if (WARN(local->suspended, "queueing ieee80211 work while "
|
||||
"going to suspend\n"))
|
||||
return false;
|
||||
if (WARN(local->suspended && !local->resuming,
|
||||
"queueing ieee80211 work while going to suspend\n"))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1025,13 +1025,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
|
|||
struct sta_info *sta;
|
||||
unsigned long flags;
|
||||
int res;
|
||||
bool from_suspend = local->suspended;
|
||||
|
||||
/*
|
||||
* We're going to start the hardware, at that point
|
||||
* we are no longer suspended and can RX frames.
|
||||
*/
|
||||
local->suspended = false;
|
||||
if (local->suspended)
|
||||
local->resuming = true;
|
||||
|
||||
/* restart hardware */
|
||||
if (local->open_count) {
|
||||
|
@ -1129,11 +1125,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
|
|||
* If this is for hw restart things are still running.
|
||||
* We may want to change that later, however.
|
||||
*/
|
||||
if (!from_suspend)
|
||||
if (!local->suspended)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* first set suspended false, then resuming */
|
||||
local->suspended = false;
|
||||
mb();
|
||||
local->resuming = false;
|
||||
|
||||
list_for_each_entry(sdata, &local->interfaces, list) {
|
||||
switch(sdata->vif.type) {
|
||||
|
|
|
@ -128,9 +128,8 @@ EXPORT_SYMBOL(nf_log_packet);
|
|||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static void *seq_start(struct seq_file *seq, loff_t *pos)
|
||||
__acquires(RCU)
|
||||
{
|
||||
rcu_read_lock();
|
||||
mutex_lock(&nf_log_mutex);
|
||||
|
||||
if (*pos >= ARRAY_SIZE(nf_loggers))
|
||||
return NULL;
|
||||
|
@ -149,9 +148,8 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
|
|||
}
|
||||
|
||||
static void seq_stop(struct seq_file *s, void *v)
|
||||
__releases(RCU)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
}
|
||||
|
||||
static int seq_show(struct seq_file *s, void *v)
|
||||
|
@ -161,7 +159,7 @@ static int seq_show(struct seq_file *s, void *v)
|
|||
struct nf_logger *t;
|
||||
int ret;
|
||||
|
||||
logger = rcu_dereference(nf_loggers[*pos]);
|
||||
logger = nf_loggers[*pos];
|
||||
|
||||
if (!logger)
|
||||
ret = seq_printf(s, "%2lld NONE (", *pos);
|
||||
|
@ -171,22 +169,16 @@ static int seq_show(struct seq_file *s, void *v)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&nf_log_mutex);
|
||||
list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
|
||||
ret = seq_printf(s, "%s", t->name);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
|
||||
ret = seq_printf(s, ",");
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
|
||||
return seq_printf(s, ")\n");
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
|
|||
|
||||
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (priv == NULL)
|
||||
return -ENOMEM;
|
||||
return false;
|
||||
|
||||
/* For SMP, we only want to use one set of state. */
|
||||
r->master = priv;
|
||||
|
|
|
@ -118,7 +118,7 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
|
|||
{
|
||||
struct xt_osf_user_finger *f;
|
||||
struct xt_osf_finger *sf;
|
||||
int err = ENOENT;
|
||||
int err = -ENOENT;
|
||||
|
||||
if (!osf_attrs[OSF_ATTR_FINGER])
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1189,6 +1189,7 @@ static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
|
|||
#endif
|
||||
|
||||
static const struct file_operations rfkill_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = rfkill_fop_open,
|
||||
.read = rfkill_fop_read,
|
||||
.write = rfkill_fop_write,
|
||||
|
|
|
@ -423,16 +423,6 @@ void sctp_retransmit_mark(struct sctp_outq *q,
|
|||
if ((reason == SCTP_RTXR_FAST_RTX &&
|
||||
(chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
|
||||
(reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
|
||||
/* If this chunk was sent less then 1 rto ago, do not
|
||||
* retransmit this chunk, but give the peer time
|
||||
* to acknowlege it. Do this only when
|
||||
* retransmitting due to T3 timeout.
|
||||
*/
|
||||
if (reason == SCTP_RTXR_T3_RTX &&
|
||||
time_before(jiffies, chunk->sent_at +
|
||||
transport->last_rto))
|
||||
continue;
|
||||
|
||||
/* RFC 2960 6.2.1 Processing a Received SACK
|
||||
*
|
||||
* C) Any time a DATA chunk is marked for
|
||||
|
|
|
@ -480,7 +480,6 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
|
|||
* that indicates that we have an outstanding HB.
|
||||
*/
|
||||
if (!is_hb || transport->hb_sent) {
|
||||
transport->last_rto = transport->rto;
|
||||
transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
|
|||
* given destination transport address, set RTO to the protocol
|
||||
* parameter 'RTO.Initial'.
|
||||
*/
|
||||
peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
|
||||
peer->rto = msecs_to_jiffies(sctp_rto_initial);
|
||||
peer->rtt = 0;
|
||||
peer->rttvar = 0;
|
||||
peer->srtt = 0;
|
||||
|
@ -386,7 +386,6 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
|
|||
tp->rto = tp->asoc->rto_max;
|
||||
|
||||
tp->rtt = rtt;
|
||||
tp->last_rto = tp->rto;
|
||||
|
||||
/* Reset rto_pending so that a new RTT measurement is started when a
|
||||
* new data chunk is sent.
|
||||
|
@ -602,7 +601,7 @@ void sctp_transport_reset(struct sctp_transport *t)
|
|||
*/
|
||||
t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
|
||||
t->ssthresh = asoc->peer.i.a_rwnd;
|
||||
t->last_rto = t->rto = asoc->rto_initial;
|
||||
t->rto = asoc->rto_initial;
|
||||
t->rtt = 0;
|
||||
t->srtt = 0;
|
||||
t->rttvar = 0;
|
||||
|
|
Loading…
Reference in New Issue