Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (34 commits)
  b43: Fix warning at drivers/mmc/core/core.c:237 in mmc_wait_for_cmd
  mac80211: fix failure to check kmalloc return value in key_key_read
  libertas: Fix sd8686 firmware reload
  ath9k: Fix incorrect access of rate flags in RC
  netfilter: xt_socket: Make tproto signed in socket_mt6_v1().
  stmmac: enable/disable rx/tx in the core with a single write.
  net: atarilance - flags should be unsigned long
  netxen: fix kdump
  pktgen: Limit how much data we copy onto the stack.
  net: Limit socket I/O iovec total length to INT_MAX.
  USB: gadget: fix ethernet gadget crash in gether_setup
  fib: Fix fib zone and its hash leak on namespace stop
  cxgb3: Fix panic in free_tx_desc()
  cxgb3: fix crash due to manipulating queues before registration
  8390: Don't oops on starting dev queue
  dccp ccid-2: Stop polling
  dccp: Refine the wait-for-ccid mechanism
  dccp: Extend CCID packet dequeueing interface
  dccp: Return-value convention of hc_tx_send_packet()
  igbvf: fix panic on load
  ...
This commit is contained in:
Linus Torvalds 2010-10-29 14:17:12 -07:00
commit 1840897ab5
49 changed files with 527 additions and 248 deletions

View File

@ -2541,6 +2541,7 @@ source "drivers/net/stmmac/Kconfig"
config PCH_GBE config PCH_GBE
tristate "PCH Gigabit Ethernet" tristate "PCH Gigabit Ethernet"
depends on PCI depends on PCI
select MII
---help--- ---help---
This is a gigabit ethernet driver for Topcliff PCH. This is a gigabit ethernet driver for Topcliff PCH.
Topcliff PCH is the platform controller hub that is used in Intel's Topcliff PCH is the platform controller hub that is used in Intel's

View File

@ -407,7 +407,7 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag,
int writeflag) int writeflag)
{ {
int ret; int ret;
long flags; unsigned long flags;
long *vbr, save_berr; long *vbr, save_berr;
local_irq_save(flags); local_irq_save(flags);

View File

@ -3301,7 +3301,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->rx_offload = T3_RX_CSUM | T3_LRO; pi->rx_offload = T3_RX_CSUM | T3_LRO;
pi->port_id = i; pi->port_id = i;
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq; netdev->irq = pdev->irq;
netdev->mem_start = mmio_start; netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1; netdev->mem_end = mmio_start + mmio_len - 1;
@ -3342,6 +3341,7 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->name = adapter->port[i]->name; adapter->name = adapter->port[i]->name;
__set_bit(i, &adapter->registered_device_map); __set_bit(i, &adapter->registered_device_map);
netif_tx_stop_all_queues(adapter->port[i]);
} }
} }
if (!adapter->registered_device_map) { if (!adapter->registered_device_map) {

View File

@ -296,8 +296,10 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
if (d->skb) { /* an SGL is present */ if (d->skb) { /* an SGL is present */
if (need_unmap) if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev); unmap_skb(d->skb, q, cidx, pdev);
if (d->eop) if (d->eop) {
kfree_skb(d->skb); kfree_skb(d->skb);
d->skb = NULL;
}
} }
++d; ++d;
if (++cidx == q->size) { if (++cidx == q->size) {

View File

@ -52,6 +52,10 @@
(ID_LED_DEF1_DEF2)) (ID_LED_DEF1_DEF2))
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
#define E1000_BASE1000T_STATUS 10
#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
#define E1000_RECEIVE_ERROR_COUNTER 21
#define E1000_RECEIVE_ERROR_MAX 0xFFFF
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ #define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
@ -1242,6 +1246,39 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
return 0; return 0;
} }
/**
* e1000_check_phy_82574 - check 82574 phy hung state
* @hw: pointer to the HW structure
*
* Returns whether phy is hung or not
**/
bool e1000_check_phy_82574(struct e1000_hw *hw)
{
u16 status_1kbt = 0;
u16 receive_errors = 0;
bool phy_hung = false;
s32 ret_val = 0;
/*
* Read PHY Receive Error counter first, if its is max - all F's then
* read the Base1000T status register If both are max then PHY is hung.
*/
ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
if (ret_val)
goto out;
if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
if (ret_val)
goto out;
if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
E1000_IDLE_ERROR_COUNT_MASK)
phy_hung = true;
}
out:
return phy_hung;
}
/** /**
* e1000_setup_link_82571 - Setup flow control and link settings * e1000_setup_link_82571 - Setup flow control and link settings
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
@ -1859,6 +1896,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT | FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD, | FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG,
.pba = 36, .pba = 36,
.max_hw_frame_size = DEFAULT_JUMBO, .max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571, .get_variants = e1000_get_variants_82571,

View File

@ -397,6 +397,7 @@ struct e1000_adapter {
struct work_struct print_hang_task; struct work_struct print_hang_task;
bool idle_check; bool idle_check;
int phy_hang_count;
}; };
struct e1000_info { struct e1000_info {
@ -454,6 +455,7 @@ struct e1000_info {
#define FLAG2_HAS_EEE (1 << 5) #define FLAG2_HAS_EEE (1 << 5)
#define FLAG2_DMA_BURST (1 << 6) #define FLAG2_DMA_BURST (1 << 6)
#define FLAG2_DISABLE_AIM (1 << 8) #define FLAG2_DISABLE_AIM (1 << 8)
#define FLAG2_CHECK_PHY_HANG (1 << 9)
#define E1000_RX_DESC_PS(R, i) \ #define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@ -631,6 +633,7 @@ extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
extern bool e1000_check_phy_82574(struct e1000_hw *hw);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{ {

View File

@ -4098,6 +4098,25 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
} }
} }
static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
/*
* With 82574 controllers, PHY needs to be checked periodically
* for hung state and reset, if two calls return true
*/
if (e1000_check_phy_82574(hw))
adapter->phy_hang_count++;
else
adapter->phy_hang_count = 0;
if (adapter->phy_hang_count > 1) {
adapter->phy_hang_count = 0;
schedule_work(&adapter->reset_task);
}
}
/** /**
* e1000_watchdog - Timer Call-back * e1000_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long * @data: pointer to adapter cast into an unsigned long
@ -4333,6 +4352,9 @@ link_up:
if (e1000e_get_laa_state_82571(hw)) if (e1000e_get_laa_state_82571(hw))
e1000e_rar_set(hw, adapter->hw.mac.addr, 0); e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
e1000e_check_82574_phy_workaround(adapter);
/* Reset the timer */ /* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state)) if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, mod_timer(&adapter->watchdog_timer,
@ -4860,8 +4882,11 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter; struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task); adapter = container_of(work, struct e1000_adapter, reset_task);
e1000e_dump(adapter); if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
e_err("Reset adapter\n"); (adapter->flags & FLAG_RX_RESTART_NOW))) {
e1000e_dump(adapter);
e_err("Reset adapter\n");
}
e1000e_reinit_locked(adapter); e1000e_reinit_locked(adapter);
} }

View File

@ -4107,7 +4107,6 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
struct igb_ring *tx_ring) struct igb_ring *tx_ring)
{ {
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
int tso = 0, count; int tso = 0, count;
u32 tx_flags = 0; u32 tx_flags = 0;
u16 first; u16 first;

View File

@ -2783,15 +2783,15 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
/* reset the hardware with the new settings */ /* reset the hardware with the new settings */
igbvf_reset(adapter); igbvf_reset(adapter);
/* tell the stack to leave us alone until igbvf_open() is called */
netif_carrier_off(netdev);
netif_stop_queue(netdev);
strcpy(netdev->name, "eth%d"); strcpy(netdev->name, "eth%d");
err = register_netdev(netdev); err = register_netdev(netdev);
if (err) if (err)
goto err_hw_init; goto err_hw_init;
/* tell the stack to leave us alone until igbvf_open() is called */
netif_carrier_off(netdev);
netif_stop_queue(netdev);
igbvf_print_device_info(adapter); igbvf_print_device_info(adapter);
igbvf_initialize_last_counter_stats(adapter); igbvf_initialize_last_counter_stats(adapter);

View File

@ -533,6 +533,7 @@ ixgb_remove(struct pci_dev *pdev)
pci_release_regions(pdev); pci_release_regions(pdev);
free_netdev(netdev); free_netdev(netdev);
pci_disable_device(pdev);
} }
/** /**

View File

@ -43,9 +43,12 @@
* ixgbe_dcb_check_config(). * ixgbe_dcb_check_config().
*/ */
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
u8 direction) int max_frame, u8 direction)
{ {
struct tc_bw_alloc *p; struct tc_bw_alloc *p;
int min_credit;
int min_multiplier;
int min_percent = 100;
s32 ret_val = 0; s32 ret_val = 0;
/* Initialization values default for Tx settings */ /* Initialization values default for Tx settings */
u32 credit_refill = 0; u32 credit_refill = 0;
@ -59,6 +62,31 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
goto out; goto out;
} }
min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
DCB_CREDIT_QUANTUM;
/* Find smallest link percentage */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction];
bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
link_percentage = p->bwg_percent;
link_percentage = (link_percentage * bw_percent) / 100;
if (link_percentage && link_percentage < min_percent)
min_percent = link_percentage;
}
/*
* The ratio between traffic classes will control the bandwidth
* percentages seen on the wire. To calculate this ratio we use
* a multiplier. It is required that the refill credits must be
* larger than the max frame size so here we find the smallest
* multiplier that will allow all bandwidth percentages to be
* greater than the max frame size.
*/
min_multiplier = (min_credit / min_percent) + 1;
/* Find out the link percentage for each TC first */ /* Find out the link percentage for each TC first */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction]; p = &dcb_config->tc_config[i].path[direction];
@ -73,8 +101,9 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
/* Save link_percentage for reference */ /* Save link_percentage for reference */
p->link_percent = (u8)link_percentage; p->link_percent = (u8)link_percentage;
/* Calculate credit refill and save it */ /* Calculate credit refill ratio using multiplier */
credit_refill = link_percentage * MINIMUM_CREDIT_REFILL; credit_refill = min(link_percentage * min_multiplier,
MAX_CREDIT_REFILL);
p->data_credits_refill = (u16)credit_refill; p->data_credits_refill = (u16)credit_refill;
/* Calculate maximum credit for the TC */ /* Calculate maximum credit for the TC */
@ -85,8 +114,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
* of a TC is too small, the maximum credit may not be * of a TC is too small, the maximum credit may not be
* enough to send out a jumbo frame in data plane arbitration. * enough to send out a jumbo frame in data plane arbitration.
*/ */
if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO)) if (credit_max && (credit_max < min_credit))
credit_max = MINIMUM_CREDIT_FOR_JUMBO; credit_max = min_credit;
if (direction == DCB_TX_CONFIG) { if (direction == DCB_TX_CONFIG) {
/* /*

View File

@ -150,15 +150,14 @@ struct ixgbe_dcb_config {
/* DCB driver APIs */ /* DCB driver APIs */
/* DCB credits calculation */ /* DCB credits calculation */
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8); s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */ /* DCB hw initialization */
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
/* DCB definitions for credit calculation */ /* DCB definitions for credit calculation */
#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ #define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ #define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ #define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ #define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */

View File

@ -397,6 +397,11 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
reg &= ~IXGBE_RTTDCS_ARBDIS; reg &= ~IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
/* Enable Security TX Buffer IFG for DCB */
reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
reg |= IXGBE_SECTX_DCB;
IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
return 0; return 0;
} }

View File

@ -95,6 +95,9 @@
#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ #define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
/* SECTXMINIFG DCB */
#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
/* DCB hardware-specific driver APIs */ /* DCB hardware-specific driver APIs */

View File

@ -3347,6 +3347,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
u32 txdctl; u32 txdctl;
int i, j; int i, j;
@ -3359,8 +3360,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB) if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 32768); netif_set_gso_max_size(adapter->netdev, 32768);
ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); #ifdef CONFIG_FCOE
ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); if (adapter->netdev->features & NETIF_F_FCOE_MTU)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
DCB_TX_CONFIG);
ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
DCB_RX_CONFIG);
/* reconfigure the hardware */ /* reconfigure the hardware */
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);

View File

@ -1077,7 +1077,6 @@ static void __NS8390_init(struct net_device *dev, int startp)
ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
netif_start_queue(dev);
ei_local->tx1 = ei_local->tx2 = 0; ei_local->tx1 = ei_local->tx2 = 0;
ei_local->txing = 0; ei_local->txing = 0;

View File

@ -254,19 +254,6 @@ out_free_rq:
return err; return err;
} }
static void
nx_fw_cmd_reset_ctx(struct netxen_adapter *adapter)
{
netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
NX_CDRP_CMD_DESTROY_RX_CTX);
netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
NX_CDRP_CMD_DESTROY_TX_CTX);
}
static void static void
nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
{ {
@ -698,8 +685,6 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
goto done; goto done;
if (reset_devices)
nx_fw_cmd_reset_ctx(adapter);
err = nx_fw_cmd_create_rx_ctx(adapter); err = nx_fw_cmd_create_rx_ctx(adapter);
if (err) if (err)
goto err_out_free; goto err_out_free;

View File

@ -1356,6 +1356,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break; break;
} }
if (reset_devices) {
if (adapter->portnum == 0) {
NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
adapter->need_fw_reset = 1;
}
}
err = netxen_start_firmware(adapter); err = netxen_start_firmware(adapter);
if (err) if (err)
goto err_out_decr_ref; goto err_out_decr_ref;

View File

@ -337,33 +337,19 @@ static int stmmac_init_phy(struct net_device *dev)
return 0; return 0;
} }
static inline void stmmac_mac_enable_rx(void __iomem *ioaddr) static inline void stmmac_enable_mac(void __iomem *ioaddr)
{ {
u32 value = readl(ioaddr + MAC_CTRL_REG); u32 value = readl(ioaddr + MAC_CTRL_REG);
value |= MAC_RNABLE_RX;
/* Set the RE (receive enable bit into the MAC CTRL register). */ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
writel(value, ioaddr + MAC_CTRL_REG); writel(value, ioaddr + MAC_CTRL_REG);
} }
static inline void stmmac_mac_enable_tx(void __iomem *ioaddr) static inline void stmmac_disable_mac(void __iomem *ioaddr)
{ {
u32 value = readl(ioaddr + MAC_CTRL_REG); u32 value = readl(ioaddr + MAC_CTRL_REG);
value |= MAC_ENABLE_TX;
/* Set the TE (transmit enable bit into the MAC CTRL register). */
writel(value, ioaddr + MAC_CTRL_REG);
}
static inline void stmmac_mac_disable_rx(void __iomem *ioaddr) value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
value &= ~MAC_RNABLE_RX;
writel(value, ioaddr + MAC_CTRL_REG);
}
static inline void stmmac_mac_disable_tx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
value &= ~MAC_ENABLE_TX;
writel(value, ioaddr + MAC_CTRL_REG); writel(value, ioaddr + MAC_CTRL_REG);
} }
@ -857,8 +843,7 @@ static int stmmac_open(struct net_device *dev)
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
/* Enable the MAC Rx/Tx */ /* Enable the MAC Rx/Tx */
stmmac_mac_enable_rx(priv->ioaddr); stmmac_enable_mac(priv->ioaddr);
stmmac_mac_enable_tx(priv->ioaddr);
/* Set the HW DMA mode and the COE */ /* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv); stmmac_dma_operation_mode(priv);
@ -928,9 +913,8 @@ static int stmmac_release(struct net_device *dev)
/* Release and free the Rx/Tx resources */ /* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv); free_dma_desc_resources(priv);
/* Disable the MAC core */ /* Disable the MAC Rx/Tx */
stmmac_mac_disable_tx(priv->ioaddr); stmmac_disable_mac(priv->ioaddr);
stmmac_mac_disable_rx(priv->ioaddr);
netif_carrier_off(dev); netif_carrier_off(dev);
@ -1787,8 +1771,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
priv->hw->dma->stop_rx(priv->ioaddr); priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr); priv->hw->dma->stop_tx(priv->ioaddr);
stmmac_mac_disable_rx(priv->ioaddr); stmmac_disable_mac(priv->ioaddr);
stmmac_mac_disable_tx(priv->ioaddr);
netif_carrier_off(ndev); netif_carrier_off(ndev);
@ -1839,13 +1822,11 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
dis_ic); dis_ic);
priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
stmmac_mac_disable_tx(priv->ioaddr);
/* Enable Power down mode by programming the PMT regs */ /* Enable Power down mode by programming the PMT regs */
if (device_can_wakeup(priv->device)) if (device_can_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
else else
stmmac_mac_disable_rx(priv->ioaddr); stmmac_disable_mac(priv->ioaddr);
} else { } else {
priv->shutdown = 1; priv->shutdown = 1;
/* Although this can appear slightly redundant it actually /* Although this can appear slightly redundant it actually
@ -1886,8 +1867,7 @@ static int stmmac_resume(struct platform_device *pdev)
netif_device_attach(dev); netif_device_attach(dev);
/* Enable the MAC and DMA */ /* Enable the MAC and DMA */
stmmac_mac_enable_rx(priv->ioaddr); stmmac_enable_mac(priv->ioaddr);
stmmac_mac_enable_tx(priv->ioaddr);
priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr);

View File

@ -139,12 +139,12 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Fill the ath5k_hw struct with the needed functions */ /* Fill the ath5k_hw struct with the needed functions */
ret = ath5k_hw_init_desc_functions(ah); ret = ath5k_hw_init_desc_functions(ah);
if (ret) if (ret)
goto err_free; goto err;
/* Bring device out of sleep and reset its units */ /* Bring device out of sleep and reset its units */
ret = ath5k_hw_nic_wakeup(ah, 0, true); ret = ath5k_hw_nic_wakeup(ah, 0, true);
if (ret) if (ret)
goto err_free; goto err;
/* Get MAC, PHY and RADIO revisions */ /* Get MAC, PHY and RADIO revisions */
ah->ah_mac_srev = srev; ah->ah_mac_srev = srev;
@ -234,7 +234,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
} else { } else {
ATH5K_ERR(sc, "Couldn't identify radio revision.\n"); ATH5K_ERR(sc, "Couldn't identify radio revision.\n");
ret = -ENODEV; ret = -ENODEV;
goto err_free; goto err;
} }
} }
@ -244,7 +244,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
(srev < AR5K_SREV_AR2425)) { (srev < AR5K_SREV_AR2425)) {
ATH5K_ERR(sc, "Device not yet supported.\n"); ATH5K_ERR(sc, "Device not yet supported.\n");
ret = -ENODEV; ret = -ENODEV;
goto err_free; goto err;
} }
/* /*
@ -252,7 +252,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
*/ */
ret = ath5k_hw_post(ah); ret = ath5k_hw_post(ah);
if (ret) if (ret)
goto err_free; goto err;
/* Enable pci core retry fix on Hainan (5213A) and later chips */ /* Enable pci core retry fix on Hainan (5213A) and later chips */
if (srev >= AR5K_SREV_AR5213A) if (srev >= AR5K_SREV_AR5213A)
@ -265,7 +265,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ret = ath5k_eeprom_init(ah); ret = ath5k_eeprom_init(ah);
if (ret) { if (ret) {
ATH5K_ERR(sc, "unable to init EEPROM\n"); ATH5K_ERR(sc, "unable to init EEPROM\n");
goto err_free; goto err;
} }
ee = &ah->ah_capabilities.cap_eeprom; ee = &ah->ah_capabilities.cap_eeprom;
@ -307,7 +307,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
if (ret) { if (ret) {
ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n", ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n",
sc->pdev->device); sc->pdev->device);
goto err_free; goto err;
} }
/* Crypto settings */ /* Crypto settings */
@ -341,8 +341,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ath5k_hw_set_ledstate(ah, AR5K_LED_INIT); ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
return 0; return 0;
err_free: err:
kfree(ah);
return ret; return ret;
} }

View File

@ -310,7 +310,7 @@ struct ath_rx {
u8 rxotherant; u8 rxotherant;
u32 *rxlink; u32 *rxlink;
unsigned int rxfilter; unsigned int rxfilter;
spinlock_t rxflushlock; spinlock_t pcu_lock;
spinlock_t rxbuflock; spinlock_t rxbuflock;
struct list_head rxbuf; struct list_head rxbuf;
struct ath_descdma rxdma; struct ath_descdma rxdma;

View File

@ -801,10 +801,16 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
} }
kfree(buf); kfree(buf);
if ((hif_dev->device_id == 0x7010) || (hif_dev->device_id == 0x7015)) switch (hif_dev->device_id) {
case 0x7010:
case 0x7015:
case 0x9018:
firm_offset = AR7010_FIRMWARE_TEXT; firm_offset = AR7010_FIRMWARE_TEXT;
else break;
default:
firm_offset = AR9271_FIRMWARE_TEXT; firm_offset = AR9271_FIRMWARE_TEXT;
break;
}
/* /*
* Issue FW download complete command to firmware. * Issue FW download complete command to firmware.

View File

@ -241,6 +241,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
*/ */
ath9k_hw_set_interrupts(ah, 0); ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, false); ath_drain_all_txq(sc, false);
spin_lock_bh(&sc->rx.pcu_lock);
stopped = ath_stoprecv(sc); stopped = ath_stoprecv(sc);
/* XXX: do not flush receive queue here. We don't want /* XXX: do not flush receive queue here. We don't want
@ -268,6 +271,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
"reset status %d\n", "reset status %d\n",
channel->center_freq, r); channel->center_freq, r);
spin_unlock_bh(&sc->sc_resetlock); spin_unlock_bh(&sc->sc_resetlock);
spin_unlock_bh(&sc->rx.pcu_lock);
goto ps_restore; goto ps_restore;
} }
spin_unlock_bh(&sc->sc_resetlock); spin_unlock_bh(&sc->sc_resetlock);
@ -276,9 +280,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
ath_print(common, ATH_DBG_FATAL, ath_print(common, ATH_DBG_FATAL,
"Unable to restart recv logic\n"); "Unable to restart recv logic\n");
r = -EIO; r = -EIO;
spin_unlock_bh(&sc->rx.pcu_lock);
goto ps_restore; goto ps_restore;
} }
spin_unlock_bh(&sc->rx.pcu_lock);
ath_update_txpow(sc); ath_update_txpow(sc);
ath9k_hw_set_interrupts(ah, ah->imask); ath9k_hw_set_interrupts(ah, ah->imask);
@ -613,7 +620,7 @@ void ath9k_tasklet(unsigned long data)
rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN); rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
if (status & rxmask) { if (status & rxmask) {
spin_lock_bh(&sc->rx.rxflushlock); spin_lock_bh(&sc->rx.pcu_lock);
/* Check for high priority Rx first */ /* Check for high priority Rx first */
if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
@ -621,7 +628,7 @@ void ath9k_tasklet(unsigned long data)
ath_rx_tasklet(sc, 0, true); ath_rx_tasklet(sc, 0, true);
ath_rx_tasklet(sc, 0, false); ath_rx_tasklet(sc, 0, false);
spin_unlock_bh(&sc->rx.rxflushlock); spin_unlock_bh(&sc->rx.pcu_lock);
} }
if (status & ATH9K_INT_TX) { if (status & ATH9K_INT_TX) {
@ -876,6 +883,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
if (!ah->curchan) if (!ah->curchan)
ah->curchan = ath_get_curchannel(sc, sc->hw); ah->curchan = ath_get_curchannel(sc, sc->hw);
spin_lock_bh(&sc->rx.pcu_lock);
spin_lock_bh(&sc->sc_resetlock); spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
if (r) { if (r) {
@ -890,8 +898,10 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
if (ath_startrecv(sc) != 0) { if (ath_startrecv(sc) != 0) {
ath_print(common, ATH_DBG_FATAL, ath_print(common, ATH_DBG_FATAL,
"Unable to restart recv logic\n"); "Unable to restart recv logic\n");
spin_unlock_bh(&sc->rx.pcu_lock);
return; return;
} }
spin_unlock_bh(&sc->rx.pcu_lock);
if (sc->sc_flags & SC_OP_BEACONS) if (sc->sc_flags & SC_OP_BEACONS)
ath_beacon_config(sc, NULL); /* restart beacons */ ath_beacon_config(sc, NULL); /* restart beacons */
@ -930,6 +940,9 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_hw_set_interrupts(ah, 0); ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, false); /* clear pending tx frames */ ath_drain_all_txq(sc, false); /* clear pending tx frames */
spin_lock_bh(&sc->rx.pcu_lock);
ath_stoprecv(sc); /* turn off frame recv */ ath_stoprecv(sc); /* turn off frame recv */
ath_flushrecv(sc); /* flush recv queue */ ath_flushrecv(sc); /* flush recv queue */
@ -947,6 +960,9 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_resetlock); spin_unlock_bh(&sc->sc_resetlock);
ath9k_hw_phy_disable(ah); ath9k_hw_phy_disable(ah);
spin_unlock_bh(&sc->rx.pcu_lock);
ath9k_hw_configpcipowersave(ah, 1, 1); ath9k_hw_configpcipowersave(ah, 1, 1);
ath9k_ps_restore(sc); ath9k_ps_restore(sc);
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
@ -966,6 +982,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
ath9k_hw_set_interrupts(ah, 0); ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx); ath_drain_all_txq(sc, retry_tx);
spin_lock_bh(&sc->rx.pcu_lock);
ath_stoprecv(sc); ath_stoprecv(sc);
ath_flushrecv(sc); ath_flushrecv(sc);
@ -980,6 +999,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
ath_print(common, ATH_DBG_FATAL, ath_print(common, ATH_DBG_FATAL,
"Unable to start recv logic\n"); "Unable to start recv logic\n");
spin_unlock_bh(&sc->rx.pcu_lock);
/* /*
* We may be doing a reset in response to a request * We may be doing a reset in response to a request
* that changes the channel so update any state that * that changes the channel so update any state that
@ -1142,6 +1163,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
* be followed by initialization of the appropriate bits * be followed by initialization of the appropriate bits
* and then setup of the interrupt mask. * and then setup of the interrupt mask.
*/ */
spin_lock_bh(&sc->rx.pcu_lock);
spin_lock_bh(&sc->sc_resetlock); spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (r) { if (r) {
@ -1150,6 +1172,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
"(freq %u MHz)\n", r, "(freq %u MHz)\n", r,
curchan->center_freq); curchan->center_freq);
spin_unlock_bh(&sc->sc_resetlock); spin_unlock_bh(&sc->sc_resetlock);
spin_unlock_bh(&sc->rx.pcu_lock);
goto mutex_unlock; goto mutex_unlock;
} }
spin_unlock_bh(&sc->sc_resetlock); spin_unlock_bh(&sc->sc_resetlock);
@ -1171,8 +1194,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath_print(common, ATH_DBG_FATAL, ath_print(common, ATH_DBG_FATAL,
"Unable to start recv logic\n"); "Unable to start recv logic\n");
r = -EIO; r = -EIO;
spin_unlock_bh(&sc->rx.pcu_lock);
goto mutex_unlock; goto mutex_unlock;
} }
spin_unlock_bh(&sc->rx.pcu_lock);
/* Setup our intr mask. */ /* Setup our intr mask. */
ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL | ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
@ -1371,12 +1396,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
* before setting the invalid flag. */ * before setting the invalid flag. */
ath9k_hw_set_interrupts(ah, 0); ath9k_hw_set_interrupts(ah, 0);
spin_lock_bh(&sc->rx.pcu_lock);
if (!(sc->sc_flags & SC_OP_INVALID)) { if (!(sc->sc_flags & SC_OP_INVALID)) {
ath_drain_all_txq(sc, false); ath_drain_all_txq(sc, false);
ath_stoprecv(sc); ath_stoprecv(sc);
ath9k_hw_phy_disable(ah); ath9k_hw_phy_disable(ah);
} else } else
sc->rx.rxlink = NULL; sc->rx.rxlink = NULL;
spin_unlock_bh(&sc->rx.pcu_lock);
/* disable HAL and put h/w to sleep */ /* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah); ath9k_hw_disable(ah);

View File

@ -527,7 +527,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
for (i = 0; i < rateset->rs_nrates; i++) { for (i = 0; i < rateset->rs_nrates; i++) {
for (j = 0; j < rate_table->rate_cnt; j++) { for (j = 0; j < rate_table->rate_cnt; j++) {
u32 phy = rate_table->info[j].phy; u32 phy = rate_table->info[j].phy;
u16 rate_flags = rate_table->info[i].rate_flags; u16 rate_flags = rate_table->info[j].rate_flags;
u8 rate = rateset->rs_rates[i]; u8 rate = rateset->rs_rates[i];
u8 dot11rate = rate_table->info[j].dot11rate; u8 dot11rate = rate_table->info[j].dot11rate;

View File

@ -297,19 +297,17 @@ static void ath_edma_start_recv(struct ath_softc *sc)
ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
spin_unlock_bh(&sc->rx.rxbuflock);
ath_opmode_init(sc); ath_opmode_init(sc);
ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
spin_unlock_bh(&sc->rx.rxbuflock);
} }
static void ath_edma_stop_recv(struct ath_softc *sc) static void ath_edma_stop_recv(struct ath_softc *sc)
{ {
spin_lock_bh(&sc->rx.rxbuflock);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
spin_unlock_bh(&sc->rx.rxbuflock);
} }
int ath_rx_init(struct ath_softc *sc, int nbufs) int ath_rx_init(struct ath_softc *sc, int nbufs)
@ -319,7 +317,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
struct ath_buf *bf; struct ath_buf *bf;
int error = 0; int error = 0;
spin_lock_init(&sc->rx.rxflushlock); spin_lock_init(&sc->rx.pcu_lock);
sc->sc_flags &= ~SC_OP_RXFLUSH; sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock); spin_lock_init(&sc->rx.rxbuflock);
@ -506,10 +504,11 @@ int ath_startrecv(struct ath_softc *sc)
ath9k_hw_rxena(ah); ath9k_hw_rxena(ah);
start_recv: start_recv:
spin_unlock_bh(&sc->rx.rxbuflock);
ath_opmode_init(sc); ath_opmode_init(sc);
ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
spin_unlock_bh(&sc->rx.rxbuflock);
return 0; return 0;
} }
@ -518,6 +517,7 @@ bool ath_stoprecv(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah; struct ath_hw *ah = sc->sc_ah;
bool stopped; bool stopped;
spin_lock_bh(&sc->rx.rxbuflock);
ath9k_hw_stoppcurecv(ah); ath9k_hw_stoppcurecv(ah);
ath9k_hw_setrxfilter(ah, 0); ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah); stopped = ath9k_hw_stopdmarecv(ah);
@ -526,19 +526,18 @@ bool ath_stoprecv(struct ath_softc *sc)
ath_edma_stop_recv(sc); ath_edma_stop_recv(sc);
else else
sc->rx.rxlink = NULL; sc->rx.rxlink = NULL;
spin_unlock_bh(&sc->rx.rxbuflock);
return stopped; return stopped;
} }
void ath_flushrecv(struct ath_softc *sc) void ath_flushrecv(struct ath_softc *sc)
{ {
spin_lock_bh(&sc->rx.rxflushlock);
sc->sc_flags |= SC_OP_RXFLUSH; sc->sc_flags |= SC_OP_RXFLUSH;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_rx_tasklet(sc, 1, true); ath_rx_tasklet(sc, 1, true);
ath_rx_tasklet(sc, 1, false); ath_rx_tasklet(sc, 1, false);
sc->sc_flags &= ~SC_OP_RXFLUSH; sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_unlock_bh(&sc->rx.rxflushlock);
} }
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)

View File

@ -1089,15 +1089,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
txq->axq_tx_inprogress = false; txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock); spin_unlock_bh(&txq->axq_lock);
/* flush any pending frames if aggregation is enabled */
if (sc->sc_flags & SC_OP_TXAGGR) {
if (!retry_tx) {
spin_lock_bh(&txq->axq_lock);
ath_txq_drain_pending_buffers(sc, txq);
spin_unlock_bh(&txq->axq_lock);
}
}
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
spin_lock_bh(&txq->axq_lock); spin_lock_bh(&txq->axq_lock);
while (!list_empty(&txq->txq_fifo_pending)) { while (!list_empty(&txq->txq_fifo_pending)) {
@ -1118,6 +1109,15 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
} }
spin_unlock_bh(&txq->axq_lock); spin_unlock_bh(&txq->axq_lock);
} }
/* flush any pending frames if aggregation is enabled */
if (sc->sc_flags & SC_OP_TXAGGR) {
if (!retry_tx) {
spin_lock_bh(&txq->axq_lock);
ath_txq_drain_pending_buffers(sc, txq);
spin_unlock_bh(&txq->axq_lock);
}
}
} }
void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)

View File

@ -175,7 +175,9 @@ static void b43_sdio_remove(struct sdio_func *func)
struct b43_sdio *sdio = sdio_get_drvdata(func); struct b43_sdio *sdio = sdio_get_drvdata(func);
ssb_bus_unregister(&sdio->ssb); ssb_bus_unregister(&sdio->ssb);
sdio_claim_host(func);
sdio_disable_func(func); sdio_disable_func(func);
sdio_release_host(func);
kfree(sdio); kfree(sdio);
sdio_set_drvdata(func, NULL); sdio_set_drvdata(func, NULL);
} }

View File

@ -684,18 +684,40 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
lbs_deb_enter(LBS_DEB_SDIO); lbs_deb_enter(LBS_DEB_SDIO);
/*
* Disable interrupts
*/
sdio_claim_host(card->func);
sdio_writeb(card->func, 0x00, IF_SDIO_H_INT_MASK, &ret);
sdio_release_host(card->func);
sdio_claim_host(card->func); sdio_claim_host(card->func);
scratch = if_sdio_read_scratch(card, &ret); scratch = if_sdio_read_scratch(card, &ret);
sdio_release_host(card->func); sdio_release_host(card->func);
lbs_deb_sdio("firmware status = %#x\n", scratch);
lbs_deb_sdio("scratch ret = %d\n", ret);
if (ret) if (ret)
goto out; goto out;
lbs_deb_sdio("firmware status = %#x\n", scratch);
/*
* The manual clearly describes that FEDC is the right code to use
* to detect firmware presence, but for SD8686 it is not that simple.
* Scratch is also used to store the RX packet length, so we lose
* the FEDC value early on. So we use a non-zero check in order
* to validate firmware presence.
* Additionally, the SD8686 in the Gumstix always has the high scratch
* bit set, even when the firmware is not loaded. So we have to
* exclude that from the test.
*/
if (scratch == IF_SDIO_FIRMWARE_OK) { if (scratch == IF_SDIO_FIRMWARE_OK) {
lbs_deb_sdio("firmware already loaded\n"); lbs_deb_sdio("firmware already loaded\n");
goto success; goto success;
} else if ((card->model == MODEL_8686) && (scratch & 0x7fff)) {
lbs_deb_sdio("firmware may be running\n");
goto success;
} }
ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name, ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name,
@ -709,10 +731,14 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
if (ret) if (ret)
goto out; goto out;
lbs_deb_sdio("Helper firmware loaded\n");
ret = if_sdio_prog_real(card, mainfw); ret = if_sdio_prog_real(card, mainfw);
if (ret) if (ret)
goto out; goto out;
lbs_deb_sdio("Firmware loaded\n");
success: success:
sdio_claim_host(card->func); sdio_claim_host(card->func);
sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE); sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE);
@ -1042,8 +1068,6 @@ static int if_sdio_probe(struct sdio_func *func,
priv->exit_deep_sleep = if_sdio_exit_deep_sleep; priv->exit_deep_sleep = if_sdio_exit_deep_sleep;
priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup; priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup;
priv->fw_ready = 1;
sdio_claim_host(func); sdio_claim_host(func);
/* /*
@ -1064,6 +1088,8 @@ static int if_sdio_probe(struct sdio_func *func,
if (ret) if (ret)
goto reclaim; goto reclaim;
priv->fw_ready = 1;
/* /*
* FUNC_INIT is required for SD8688 WLAN/BT multiple functions * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
*/ */

View File

@ -797,7 +797,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
* - iff DATA transfer is active, carrier is "on" * - iff DATA transfer is active, carrier is "on"
* - tx queueing enabled if open *and* carrier is "on" * - tx queueing enabled if open *and* carrier is "on"
*/ */
netif_stop_queue(net);
netif_carrier_off(net); netif_carrier_off(net);
dev->gadget = g; dev->gadget = g;
@ -812,6 +811,7 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
INFO(dev, "MAC %pM\n", net->dev_addr); INFO(dev, "MAC %pM\n", net->dev_addr);
INFO(dev, "HOST MAC %pM\n", dev->host_mac); INFO(dev, "HOST MAC %pM\n", dev->host_mac);
netif_stop_queue(net);
the_dev = dev; the_dev = dev;
} }

View File

@ -462,7 +462,8 @@ struct dccp_ackvec;
* @dccps_hc_rx_insert_options - receiver wants to add options when acking * @dccps_hc_rx_insert_options - receiver wants to add options when acking
* @dccps_hc_tx_insert_options - sender wants to add options when sending * @dccps_hc_tx_insert_options - sender wants to add options when sending
* @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3) * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
* @dccps_xmit_timer - timer for when CCID is not ready to send * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
* @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
* @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
*/ */
struct dccp_sock { struct dccp_sock {
@ -502,6 +503,7 @@ struct dccp_sock {
__u8 dccps_hc_rx_insert_options:1; __u8 dccps_hc_rx_insert_options:1;
__u8 dccps_hc_tx_insert_options:1; __u8 dccps_hc_tx_insert_options:1;
__u8 dccps_server_timewait:1; __u8 dccps_server_timewait:1;
struct tasklet_struct dccps_xmitlet;
struct timer_list dccps_xmit_timer; struct timer_list dccps_xmit_timer;
}; };

View File

@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
int offset, int offset,
unsigned int len, __wsum *csump); unsigned int len, __wsum *csump);
extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
int offset, int len); int offset, int len);

View File

@ -158,6 +158,8 @@ extern int fib_table_flush(struct fib_table *table);
extern void fib_table_select_default(struct fib_table *table, extern void fib_table_select_default(struct fib_table *table,
const struct flowi *flp, const struct flowi *flp,
struct fib_result *res); struct fib_result *res);
extern void fib_free_table(struct fib_table *tb);
#ifndef CONFIG_IP_MULTIPLE_TABLES #ifndef CONFIG_IP_MULTIPLE_TABLES

View File

@ -41,10 +41,12 @@ static inline int iov_from_user_compat_to_kern(struct iovec *kiov,
compat_size_t len; compat_size_t len;
if (get_user(len, &uiov32->iov_len) || if (get_user(len, &uiov32->iov_len) ||
get_user(buf, &uiov32->iov_base)) { get_user(buf, &uiov32->iov_base))
tot_len = -EFAULT; return -EFAULT;
break;
} if (len > INT_MAX - tot_len)
len = INT_MAX - tot_len;
tot_len += len; tot_len += len;
kiov->iov_base = compat_ptr(buf); kiov->iov_base = compat_ptr(buf);
kiov->iov_len = (__kernel_size_t) len; kiov->iov_len = (__kernel_size_t) len;

View File

@ -35,10 +35,9 @@
* in any case. * in any case.
*/ */
long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
{ {
int size, ct; int size, ct, err;
long err;
if (m->msg_namelen) { if (m->msg_namelen) {
if (mode == VERIFY_READ) { if (mode == VERIFY_READ) {
@ -62,14 +61,13 @@ long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
err = 0; err = 0;
for (ct = 0; ct < m->msg_iovlen; ct++) { for (ct = 0; ct < m->msg_iovlen; ct++) {
err += iov[ct].iov_len; size_t len = iov[ct].iov_len;
/*
* Goal is not to verify user data, but to prevent returning if (len > INT_MAX - err) {
* negative value, which is interpreted as errno. len = INT_MAX - err;
* Overflow is still possible, but it is harmless. iov[ct].iov_len = len;
*/ }
if (err < 0) err += len;
return -EMSGSIZE;
} }
return err; return err;

View File

@ -887,10 +887,11 @@ static ssize_t pktgen_if_write(struct file *file,
i += len; i += len;
if (debug) { if (debug) {
char tb[count + 1]; size_t copy = min(count, 1023);
if (copy_from_user(tb, user_buffer, count)) char tb[copy + 1];
if (copy_from_user(tb, user_buffer, copy))
return -EFAULT; return -EFAULT;
tb[count] = 0; tb[copy] = 0;
printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name, printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name,
(unsigned long)count, tb); (unsigned long)count, tb);
} }

View File

@ -134,13 +134,41 @@ static inline int ccid_get_current_tx_ccid(struct dccp_sock *dp)
extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk); extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk); extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
/*
* Congestion control of queued data packets via CCID decision.
*
* The TX CCID performs its congestion-control by indicating whether and when a
* queued packet may be sent, using the return code of ccid_hc_tx_send_packet().
* The following modes are supported via the symbolic constants below:
* - timer-based pacing (CCID returns a delay value in milliseconds);
* - autonomous dequeueing (CCID internally schedules dccps_xmitlet).
*/
enum ccid_dequeueing_decision {
CCID_PACKET_SEND_AT_ONCE = 0x00000, /* "green light": no delay */
CCID_PACKET_DELAY_MAX = 0x0FFFF, /* maximum delay in msecs */
CCID_PACKET_DELAY = 0x10000, /* CCID msec-delay mode */
CCID_PACKET_WILL_DEQUEUE_LATER = 0x20000, /* CCID autonomous mode */
CCID_PACKET_ERR = 0xF0000, /* error condition */
};
static inline int ccid_packet_dequeue_eval(const int return_code)
{
if (return_code < 0)
return CCID_PACKET_ERR;
if (return_code == 0)
return CCID_PACKET_SEND_AT_ONCE;
if (return_code <= CCID_PACKET_DELAY_MAX)
return CCID_PACKET_DELAY;
return return_code;
}
static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk,
struct sk_buff *skb) struct sk_buff *skb)
{ {
int rc = 0;
if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL) if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL)
rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb); return ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb);
return rc; return CCID_PACKET_SEND_AT_ONCE;
} }
static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk,

View File

@ -78,12 +78,9 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
{ {
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
return CCID_PACKET_WILL_DEQUEUE_LATER;
if (hc->tx_pipe < hc->tx_cwnd) return CCID_PACKET_SEND_AT_ONCE;
return 0;
return 1; /* XXX CCID should dequeue when ready instead of polling */
} }
static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
@ -115,6 +112,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
{ {
struct sock *sk = (struct sock *)data; struct sock *sk = (struct sock *)data;
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
@ -129,8 +127,6 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
if (hc->tx_rto > DCCP_RTO_MAX) if (hc->tx_rto > DCCP_RTO_MAX)
hc->tx_rto = DCCP_RTO_MAX; hc->tx_rto = DCCP_RTO_MAX;
sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
/* adjust pipe, cwnd etc */ /* adjust pipe, cwnd etc */
hc->tx_ssthresh = hc->tx_cwnd / 2; hc->tx_ssthresh = hc->tx_cwnd / 2;
if (hc->tx_ssthresh < 2) if (hc->tx_ssthresh < 2)
@ -146,6 +142,12 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
hc->tx_rpseq = 0; hc->tx_rpseq = 0;
hc->tx_rpdupack = -1; hc->tx_rpdupack = -1;
ccid2_change_l_ack_ratio(sk, 1); ccid2_change_l_ack_ratio(sk, 1);
/* if we were blocked before, we may now send cwnd=1 packet */
if (sender_was_blocked)
tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
/* restart backed-off timer */
sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
out: out:
bh_unlock_sock(sk); bh_unlock_sock(sk);
sock_put(sk); sock_put(sk);
@ -434,6 +436,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
{ {
struct dccp_sock *dp = dccp_sk(sk); struct dccp_sock *dp = dccp_sk(sk);
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
u64 ackno, seqno; u64 ackno, seqno;
struct ccid2_seq *seqp; struct ccid2_seq *seqp;
unsigned char *vector; unsigned char *vector;
@ -631,6 +634,10 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
sk_stop_timer(sk, &hc->tx_rtotimer); sk_stop_timer(sk, &hc->tx_rtotimer);
else else
sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
/* check if incoming Acks allow pending packets to be sent */
if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
} }
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)

View File

@ -81,6 +81,11 @@ struct ccid2_hc_tx_sock {
u64 tx_high_ack; u64 tx_high_ack;
}; };
static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
{
return hc->tx_pipe >= hc->tx_cwnd;
}
struct ccid2_hc_rx_sock { struct ccid2_hc_rx_sock {
int rx_data; int rx_data;
}; };

View File

@ -268,11 +268,11 @@ out:
sock_put(sk); sock_put(sk);
} }
/* /**
* returns * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
* > 0: delay (in msecs) that should pass before actually sending * @skb: next packet candidate to send on @sk
* = 0: can send immediately * This function uses the convention of ccid_packet_dequeue_eval() and
* < 0: error condition; do not send packet * returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
*/ */
static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
{ {
@ -348,7 +348,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
/* set the nominal send time for the next following packet */ /* set the nominal send time for the next following packet */
hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi); hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi);
return 0; return CCID_PACKET_SEND_AT_ONCE;
} }
static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len) static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len)

View File

@ -243,8 +243,9 @@ extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
extern void dccp_send_sync(struct sock *sk, const u64 seq, extern void dccp_send_sync(struct sock *sk, const u64 seq,
const enum dccp_pkt_type pkt_type); const enum dccp_pkt_type pkt_type);
extern void dccp_write_xmit(struct sock *sk, int block); extern void dccp_write_xmit(struct sock *sk);
extern void dccp_write_space(struct sock *sk); extern void dccp_write_space(struct sock *sk);
extern void dccp_flush_write_queue(struct sock *sk, long *time_budget);
extern void dccp_init_xmit_timers(struct sock *sk); extern void dccp_init_xmit_timers(struct sock *sk);
static inline void dccp_clear_xmit_timers(struct sock *sk) static inline void dccp_clear_xmit_timers(struct sock *sk)

View File

@ -209,108 +209,150 @@ void dccp_write_space(struct sock *sk)
} }
/** /**
* dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet * dccp_wait_for_ccid - Await CCID send permission
* @sk: socket to wait for * @sk: socket to wait for
* @skb: current skb to pass on for waiting * @delay: timeout in jiffies
* @delay: sleep timeout in milliseconds (> 0) * This is used by CCIDs which need to delay the send time in process context.
* This function is called by default when the socket is closed, and
* when a non-zero linger time is set on the socket. For consistency
*/ */
static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
{ {
struct dccp_sock *dp = dccp_sk(sk);
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
unsigned long jiffdelay; long remaining;
int rc;
do { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
dccp_pr_debug("delayed send by %d msec\n", delay); sk->sk_write_pending++;
jiffdelay = msecs_to_jiffies(delay); release_sock(sk);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); remaining = schedule_timeout(delay);
sk->sk_write_pending++; lock_sock(sk);
release_sock(sk); sk->sk_write_pending--;
schedule_timeout(jiffdelay);
lock_sock(sk);
sk->sk_write_pending--;
if (sk->sk_err)
goto do_error;
if (signal_pending(current))
goto do_interrupted;
rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
} while ((delay = rc) > 0);
out:
finish_wait(sk_sleep(sk), &wait); finish_wait(sk_sleep(sk), &wait);
return rc;
do_error: if (signal_pending(current) || sk->sk_err)
rc = -EPIPE; return -1;
goto out; return remaining;
do_interrupted:
rc = -EINTR;
goto out;
} }
void dccp_write_xmit(struct sock *sk, int block) /**
* dccp_xmit_packet - Send data packet under control of CCID
* Transmits next-queued payload and informs CCID to account for the packet.
*/
static void dccp_xmit_packet(struct sock *sk)
{
int err, len;
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue);
if (unlikely(skb == NULL))
return;
len = skb->len;
if (sk->sk_state == DCCP_PARTOPEN) {
const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
/*
* See 8.1.5 - Handshake Completion.
*
* For robustness we resend Confirm options until the client has
* entered OPEN. During the initial feature negotiation, the MPS
* is smaller than usual, reduced by the Change/Confirm options.
*/
if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
DCCP_WARN("Payload too large (%d) for featneg.\n", len);
dccp_send_ack(sk);
dccp_feat_list_purge(&dp->dccps_featneg);
}
inet_csk_schedule_ack(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
inet_csk(sk)->icsk_rto,
DCCP_RTO_MAX);
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
} else if (dccp_ack_pending(sk)) {
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
} else {
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
}
err = dccp_transmit_skb(sk, skb);
if (err)
dccp_pr_debug("transmit_skb() returned err=%d\n", err);
/*
* Register this one as sent even if an error occurred. To the remote
* end a local packet drop is indistinguishable from network loss, i.e.
* any local drop will eventually be reported via receiver feedback.
*/
ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
}
/**
* dccp_flush_write_queue - Drain queue at end of connection
* Since dccp_sendmsg queues packets without waiting for them to be sent, it may
* happen that the TX queue is not empty at the end of a connection. We give the
* HC-sender CCID a grace period of up to @time_budget jiffies. If this function
* returns with a non-empty write queue, it will be purged later.
*/
void dccp_flush_write_queue(struct sock *sk, long *time_budget)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
long delay, rc;
while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
switch (ccid_packet_dequeue_eval(rc)) {
case CCID_PACKET_WILL_DEQUEUE_LATER:
/*
* If the CCID determines when to send, the next sending
* time is unknown or the CCID may not even send again
* (e.g. remote host crashes or lost Ack packets).
*/
DCCP_WARN("CCID did not manage to send all packets\n");
return;
case CCID_PACKET_DELAY:
delay = msecs_to_jiffies(rc);
if (delay > *time_budget)
return;
rc = dccp_wait_for_ccid(sk, delay);
if (rc < 0)
return;
*time_budget -= (delay - rc);
/* check again if we can send now */
break;
case CCID_PACKET_SEND_AT_ONCE:
dccp_xmit_packet(sk);
break;
case CCID_PACKET_ERR:
skb_dequeue(&sk->sk_write_queue);
kfree_skb(skb);
dccp_pr_debug("packet discarded due to err=%ld\n", rc);
}
}
}
void dccp_write_xmit(struct sock *sk)
{ {
struct dccp_sock *dp = dccp_sk(sk); struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_write_queue))) { while ((skb = skb_peek(&sk->sk_write_queue))) {
int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
if (err > 0) { switch (ccid_packet_dequeue_eval(rc)) {
if (!block) { case CCID_PACKET_WILL_DEQUEUE_LATER:
sk_reset_timer(sk, &dp->dccps_xmit_timer, return;
msecs_to_jiffies(err)+jiffies); case CCID_PACKET_DELAY:
break; sk_reset_timer(sk, &dp->dccps_xmit_timer,
} else jiffies + msecs_to_jiffies(rc));
err = dccp_wait_for_ccid(sk, skb, err); return;
if (err && err != -EINTR) case CCID_PACKET_SEND_AT_ONCE:
DCCP_BUG("err=%d after dccp_wait_for_ccid", err); dccp_xmit_packet(sk);
} break;
case CCID_PACKET_ERR:
skb_dequeue(&sk->sk_write_queue); skb_dequeue(&sk->sk_write_queue);
if (err == 0) {
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
const int len = skb->len;
if (sk->sk_state == DCCP_PARTOPEN) {
const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
/*
* See 8.1.5 - Handshake Completion.
*
* For robustness we resend Confirm options until the client has
* entered OPEN. During the initial feature negotiation, the MPS
* is smaller than usual, reduced by the Change/Confirm options.
*/
if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
DCCP_WARN("Payload too large (%d) for featneg.\n", len);
dccp_send_ack(sk);
dccp_feat_list_purge(&dp->dccps_featneg);
}
inet_csk_schedule_ack(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
inet_csk(sk)->icsk_rto,
DCCP_RTO_MAX);
dcb->dccpd_type = DCCP_PKT_DATAACK;
} else if (dccp_ack_pending(sk))
dcb->dccpd_type = DCCP_PKT_DATAACK;
else
dcb->dccpd_type = DCCP_PKT_DATA;
err = dccp_transmit_skb(sk, skb);
ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
if (err)
DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
err);
} else {
dccp_pr_debug("packet discarded due to err=%d\n", err);
kfree_skb(skb); kfree_skb(skb);
dccp_pr_debug("packet discarded due to err=%d\n", rc);
} }
} }
} }
@ -622,7 +664,6 @@ void dccp_send_close(struct sock *sk, const int active)
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
if (active) { if (active) {
dccp_write_xmit(sk, 1);
dccp_skb_entail(sk, skb); dccp_skb_entail(sk, skb);
dccp_transmit_skb(sk, skb_clone(skb, prio)); dccp_transmit_skb(sk, skb_clone(skb, prio));
/* /*

View File

@ -726,7 +726,13 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto out_discard; goto out_discard;
skb_queue_tail(&sk->sk_write_queue, skb); skb_queue_tail(&sk->sk_write_queue, skb);
dccp_write_xmit(sk,0); /*
* The xmit_timer is set if the TX CCID is rate-based and will expire
* when congestion control permits to release further packets into the
* network. Window-based CCIDs do not use this timer.
*/
if (!timer_pending(&dp->dccps_xmit_timer))
dccp_write_xmit(sk);
out_release: out_release:
release_sock(sk); release_sock(sk);
return rc ? : len; return rc ? : len;
@ -951,9 +957,22 @@ void dccp_close(struct sock *sk, long timeout)
/* Check zero linger _after_ checking for unread data. */ /* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0); sk->sk_prot->disconnect(sk, 0);
} else if (sk->sk_state != DCCP_CLOSED) { } else if (sk->sk_state != DCCP_CLOSED) {
/*
* Normal connection termination. May need to wait if there are
* still packets in the TX queue that are delayed by the CCID.
*/
dccp_flush_write_queue(sk, &timeout);
dccp_terminate_connection(sk); dccp_terminate_connection(sk);
} }
/*
* Flush write queue. This may be necessary in several cases:
* - we have been closed by the peer but still have application data;
* - abortive termination (unread data or zero linger time),
* - normal termination but queue could not be flushed within time limit
*/
__skb_queue_purge(&sk->sk_write_queue);
sk_stream_wait_close(sk, timeout); sk_stream_wait_close(sk, timeout);
adjudge_to_death: adjudge_to_death:

View File

@ -237,32 +237,35 @@ out:
sock_put(sk); sock_put(sk);
} }
/* Transmit-delay timer: used by the CCIDs to delay actual send time */ /**
static void dccp_write_xmit_timer(unsigned long data) * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface
* See the comments above %ccid_dequeueing_decision for supported modes.
*/
static void dccp_write_xmitlet(unsigned long data)
{ {
struct sock *sk = (struct sock *)data; struct sock *sk = (struct sock *)data;
struct dccp_sock *dp = dccp_sk(sk);
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
else else
dccp_write_xmit(sk, 0); dccp_write_xmit(sk);
bh_unlock_sock(sk); bh_unlock_sock(sk);
sock_put(sk);
} }
static void dccp_init_write_xmit_timer(struct sock *sk) static void dccp_write_xmit_timer(unsigned long data)
{ {
struct dccp_sock *dp = dccp_sk(sk); dccp_write_xmitlet(data);
sock_put((struct sock *)data);
setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
(unsigned long)sk);
} }
void dccp_init_xmit_timers(struct sock *sk) void dccp_init_xmit_timers(struct sock *sk)
{ {
dccp_init_write_xmit_timer(sk); struct dccp_sock *dp = dccp_sk(sk);
tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
(unsigned long)sk);
inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
&dccp_keepalive_timer); &dccp_keepalive_timer);
} }

View File

@ -1052,7 +1052,7 @@ static void ip_fib_net_exit(struct net *net)
hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
hlist_del(node); hlist_del(node);
fib_table_flush(tb); fib_table_flush(tb);
kfree(tb); fib_free_table(tb);
} }
} }
kfree(net->ipv4.fib_table_hash); kfree(net->ipv4.fib_table_hash);

View File

@ -716,6 +716,24 @@ int fib_table_flush(struct fib_table *tb)
return found; return found;
} }
void fib_free_table(struct fib_table *tb)
{
struct fn_hash *table = (struct fn_hash *) tb->tb_data;
struct fn_zone *fz, *next;
next = table->fn_zone_list;
while (next != NULL) {
fz = next;
next = fz->fz_next;
if (fz->fz_hash != fz->fz_embedded_hash)
fz_hash_free(fz->fz_hash, fz->fz_divisor);
kfree(fz);
}
kfree(tb);
}
static inline int static inline int
fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb, fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,

View File

@ -1797,6 +1797,11 @@ int fib_table_flush(struct fib_table *tb)
return found; return found;
} }
void fib_free_table(struct fib_table *tb)
{
kfree(tb);
}
void fib_table_select_default(struct fib_table *tb, void fib_table_select_default(struct fib_table *tb,
const struct flowi *flp, const struct flowi *flp,
struct fib_result *res) struct fib_result *res)

View File

@ -203,9 +203,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct ieee80211_key *key = file->private_data; struct ieee80211_key *key = file->private_data;
int i, res, bufsize = 2 * key->conf.keylen + 2; int i, bufsize = 2 * key->conf.keylen + 2;
char *buf = kmalloc(bufsize, GFP_KERNEL); char *buf = kmalloc(bufsize, GFP_KERNEL);
char *p = buf; char *p = buf;
ssize_t res;
if (!buf)
return -ENOMEM;
for (i = 0; i < key->conf.keylen; i++) for (i = 0; i < key->conf.keylen; i++)
p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);

View File

@ -677,10 +677,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* /*
* Calculate scan IE length -- we need this to alloc * Calculate scan IE length -- we need this to alloc
* memory and to subtract from the driver limit. It * memory and to subtract from the driver limit. It
* includes the (extended) supported rates and HT * includes the DS Params, (extended) supported rates, and HT
* information -- SSID is the driver's responsibility. * information -- SSID is the driver's responsibility.
*/ */
local->scan_ies_len = 4 + max_bitrates; /* (ext) supp rates */ local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ +
3 /* DS Params */;
if (supp_ht) if (supp_ht)
local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);

View File

@ -195,7 +195,7 @@ socket_mt4_v1(const struct sk_buff *skb, struct xt_action_param *par)
static int static int
extract_icmp6_fields(const struct sk_buff *skb, extract_icmp6_fields(const struct sk_buff *skb,
unsigned int outside_hdrlen, unsigned int outside_hdrlen,
u8 *protocol, int *protocol,
struct in6_addr **raddr, struct in6_addr **raddr,
struct in6_addr **laddr, struct in6_addr **laddr,
__be16 *rport, __be16 *rport,
@ -252,8 +252,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
struct sock *sk; struct sock *sk;
struct in6_addr *daddr, *saddr; struct in6_addr *daddr, *saddr;
__be16 dport, sport; __be16 dport, sport;
int thoff; int thoff, tproto;
u8 tproto;
const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
@ -305,7 +304,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
sk = NULL; sk = NULL;
} }
pr_debug("proto %hhu %pI6:%hu -> %pI6:%hu " pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
"(orig %pI6:%hu) sock %p\n", "(orig %pI6:%hu) sock %p\n",
tproto, saddr, ntohs(sport), tproto, saddr, ntohs(sport),
daddr, ntohs(dport), daddr, ntohs(dport),