Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Resolved logic conflicts causing a build failure due to drivers/net/r8169.c changes using a patch from Stephen Rothwell. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2bd93d7af1
|
@ -393,8 +393,8 @@ typedef struct fec {
|
|||
uint fec_addr_low; /* lower 32 bits of station address */
|
||||
ushort fec_addr_high; /* upper 16 bits of station address */
|
||||
ushort res1; /* reserved */
|
||||
uint fec_hash_table_high; /* upper 32-bits of hash table */
|
||||
uint fec_hash_table_low; /* lower 32-bits of hash table */
|
||||
uint fec_grp_hash_table_high; /* upper 32-bits of hash table */
|
||||
uint fec_grp_hash_table_low; /* lower 32-bits of hash table */
|
||||
uint fec_r_des_start; /* beginning of Rx descriptor ring */
|
||||
uint fec_x_des_start; /* beginning of Tx descriptor ring */
|
||||
uint fec_r_buff_size; /* Rx buffer size */
|
||||
|
|
|
@ -566,9 +566,9 @@ struct atl1c_adapter {
|
|||
#define __AT_TESTING 0x0001
|
||||
#define __AT_RESETTING 0x0002
|
||||
#define __AT_DOWN 0x0003
|
||||
u8 work_event;
|
||||
#define ATL1C_WORK_EVENT_RESET 0x01
|
||||
#define ATL1C_WORK_EVENT_LINK_CHANGE 0x02
|
||||
unsigned long work_event;
|
||||
#define ATL1C_WORK_EVENT_RESET 0
|
||||
#define ATL1C_WORK_EVENT_LINK_CHANGE 1
|
||||
u32 msg_enable;
|
||||
|
||||
bool have_msi;
|
||||
|
|
|
@ -325,7 +325,7 @@ static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
|
|||
}
|
||||
}
|
||||
|
||||
adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE;
|
||||
set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event);
|
||||
schedule_work(&adapter->common_task);
|
||||
}
|
||||
|
||||
|
@ -337,20 +337,16 @@ static void atl1c_common_task(struct work_struct *work)
|
|||
adapter = container_of(work, struct atl1c_adapter, common_task);
|
||||
netdev = adapter->netdev;
|
||||
|
||||
if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
|
||||
adapter->work_event &= ~ATL1C_WORK_EVENT_RESET;
|
||||
if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
|
||||
netif_device_detach(netdev);
|
||||
atl1c_down(adapter);
|
||||
atl1c_up(adapter);
|
||||
netif_device_attach(netdev);
|
||||
return;
|
||||
}
|
||||
|
||||
if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) {
|
||||
adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE;
|
||||
if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
|
||||
&adapter->work_event))
|
||||
atl1c_check_link_status(adapter);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
@ -369,7 +365,7 @@ static void atl1c_tx_timeout(struct net_device *netdev)
|
|||
struct atl1c_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
/* Do the reset outside of interrupt context */
|
||||
adapter->work_event |= ATL1C_WORK_EVENT_RESET;
|
||||
set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
|
||||
schedule_work(&adapter->common_task);
|
||||
}
|
||||
|
||||
|
|
|
@ -1907,6 +1907,7 @@ static void be_worker(struct work_struct *work)
|
|||
}
|
||||
|
||||
reschedule:
|
||||
adapter->work_counter++;
|
||||
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
|
||||
}
|
||||
|
||||
|
|
|
@ -2019,15 +2019,23 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
|
|||
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
|
||||
u32 *parsing_data, u32 xmit_type)
|
||||
{
|
||||
*parsing_data |= ((tcp_hdrlen(skb)/4) <<
|
||||
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
|
||||
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
|
||||
*parsing_data |=
|
||||
((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
|
||||
ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
|
||||
ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
|
||||
|
||||
*parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
|
||||
ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
|
||||
ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
|
||||
if (xmit_type & XMIT_CSUM_TCP) {
|
||||
*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
|
||||
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
|
||||
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
|
||||
|
||||
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
|
||||
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
|
||||
} else
|
||||
/* We support checksum offload for TCP and UDP only.
|
||||
* No need to pass the UDP header length - it's a constant.
|
||||
*/
|
||||
return skb_transport_header(skb) +
|
||||
sizeof(struct udphdr) - skb->data;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2043,7 +2051,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
|
|||
struct eth_tx_parse_bd_e1x *pbd,
|
||||
u32 xmit_type)
|
||||
{
|
||||
u8 hlen = (skb_network_header(skb) - skb->data) / 2;
|
||||
u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
|
||||
|
||||
/* for now NS flag is not used in Linux */
|
||||
pbd->global_data =
|
||||
|
@ -2051,9 +2059,15 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
|
|||
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
|
||||
|
||||
pbd->ip_hlen_w = (skb_transport_header(skb) -
|
||||
skb_network_header(skb)) / 2;
|
||||
skb_network_header(skb)) >> 1;
|
||||
|
||||
hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
|
||||
hlen += pbd->ip_hlen_w;
|
||||
|
||||
/* We support checksum offload for TCP and UDP only */
|
||||
if (xmit_type & XMIT_CSUM_TCP)
|
||||
hlen += tcp_hdrlen(skb) / 2;
|
||||
else
|
||||
hlen += sizeof(struct udphdr) / 2;
|
||||
|
||||
pbd->total_hlen_w = cpu_to_le16(hlen);
|
||||
hlen = hlen*2;
|
||||
|
|
|
@ -1480,8 +1480,11 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
|
|||
|
||||
static int agg_device_up(const struct aggregator *agg)
|
||||
{
|
||||
return (netif_running(agg->slave->dev) &&
|
||||
netif_carrier_ok(agg->slave->dev));
|
||||
struct port *port = agg->lag_ports;
|
||||
if (!port)
|
||||
return 0;
|
||||
return (netif_running(port->slave->dev) &&
|
||||
netif_carrier_ok(port->slave->dev));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -3040,11 +3040,14 @@ static void ehea_rereg_mrs(void)
|
|||
|
||||
if (dev->flags & IFF_UP) {
|
||||
mutex_lock(&port->port_lock);
|
||||
port_napi_enable(port);
|
||||
ret = ehea_restart_qps(dev);
|
||||
check_sqs(port);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
check_sqs(port);
|
||||
port_napi_enable(port);
|
||||
netif_wake_queue(dev);
|
||||
} else {
|
||||
netdev_err(dev, "Unable to restart QPS\n");
|
||||
}
|
||||
mutex_unlock(&port->port_lock);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -226,8 +226,8 @@ static void set_multicast_finish(struct net_device *dev)
|
|||
}
|
||||
|
||||
FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
|
||||
FW(fecp, hash_table_high, fep->fec.hthi);
|
||||
FW(fecp, hash_table_low, fep->fec.htlo);
|
||||
FW(fecp, grp_hash_table_high, fep->fec.hthi);
|
||||
FW(fecp, grp_hash_table_low, fep->fec.htlo);
|
||||
}
|
||||
|
||||
static void set_multicast_list(struct net_device *dev)
|
||||
|
@ -273,8 +273,8 @@ static void restart(struct net_device *dev)
|
|||
/*
|
||||
* Reset all multicast.
|
||||
*/
|
||||
FW(fecp, hash_table_high, fep->fec.hthi);
|
||||
FW(fecp, hash_table_low, fep->fec.htlo);
|
||||
FW(fecp, grp_hash_table_high, fep->fec.hthi);
|
||||
FW(fecp, grp_hash_table_low, fep->fec.htlo);
|
||||
|
||||
/*
|
||||
* Set maximum receive buffer size.
|
||||
|
|
|
@ -671,6 +671,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
|
|||
goto done;
|
||||
|
||||
spin_lock_irqsave(&target_list_lock, flags);
|
||||
restart:
|
||||
list_for_each_entry(nt, &target_list, list) {
|
||||
netconsole_target_get(nt);
|
||||
if (nt->np.dev == dev) {
|
||||
|
@ -683,9 +684,16 @@ static int netconsole_netdev_event(struct notifier_block *this,
|
|||
* rtnl_lock already held
|
||||
*/
|
||||
if (nt->np.dev) {
|
||||
spin_unlock_irqrestore(
|
||||
&target_list_lock,
|
||||
flags);
|
||||
__netpoll_cleanup(&nt->np);
|
||||
spin_lock_irqsave(&target_list_lock,
|
||||
flags);
|
||||
dev_put(nt->np.dev);
|
||||
nt->np.dev = NULL;
|
||||
netconsole_target_put(nt);
|
||||
goto restart;
|
||||
}
|
||||
/* Fall through */
|
||||
case NETDEV_GOING_DOWN:
|
||||
|
|
|
@ -183,6 +183,19 @@ static const struct {
|
|||
};
|
||||
#undef _R
|
||||
|
||||
static const struct rtl_firmware_info {
|
||||
int mac_version;
|
||||
const char *fw_name;
|
||||
} rtl_firmware_infos[] = {
|
||||
{ .mac_version = RTL_GIGA_MAC_VER_25, .fw_name = FIRMWARE_8168D_1 },
|
||||
{ .mac_version = RTL_GIGA_MAC_VER_26, .fw_name = FIRMWARE_8168D_2 },
|
||||
{ .mac_version = RTL_GIGA_MAC_VER_29, .fw_name = FIRMWARE_8105E_1 },
|
||||
{ .mac_version = RTL_GIGA_MAC_VER_30, .fw_name = FIRMWARE_8105E_1 },
|
||||
{ .mac_version = RTL_GIGA_MAC_VER_30, .fw_name = FIRMWARE_8105E_1 },
|
||||
{ .mac_version = RTL_GIGA_MAC_VER_31, .fw_name = FIRMWARE_8168E_1 },
|
||||
{ .mac_version = RTL_GIGA_MAC_VER_32, .fw_name = FIRMWARE_8168E_2 }
|
||||
};
|
||||
|
||||
enum cfg_version {
|
||||
RTL_CFG_0 = 0x00,
|
||||
RTL_CFG_1,
|
||||
|
@ -632,6 +645,7 @@ struct rtl8169_private {
|
|||
u32 saved_wolopts;
|
||||
|
||||
const struct firmware *fw;
|
||||
#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN);
|
||||
};
|
||||
|
||||
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
|
||||
|
@ -1847,25 +1861,26 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
|
|||
|
||||
static void rtl_release_firmware(struct rtl8169_private *tp)
|
||||
{
|
||||
release_firmware(tp->fw);
|
||||
tp->fw = NULL;
|
||||
if (!IS_ERR_OR_NULL(tp->fw))
|
||||
release_firmware(tp->fw);
|
||||
tp->fw = RTL_FIRMWARE_UNKNOWN;
|
||||
}
|
||||
|
||||
static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
|
||||
static void rtl_apply_firmware(struct rtl8169_private *tp)
|
||||
{
|
||||
const struct firmware **fw = &tp->fw;
|
||||
int rc = !*fw;
|
||||
|
||||
if (rc) {
|
||||
rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
}
|
||||
const struct firmware *fw = tp->fw;
|
||||
|
||||
/* TODO: release firmware once rtl_phy_write_fw signals failures. */
|
||||
rtl_phy_write_fw(tp, *fw);
|
||||
out:
|
||||
return rc;
|
||||
if (!IS_ERR_OR_NULL(fw))
|
||||
rtl_phy_write_fw(tp, fw);
|
||||
}
|
||||
|
||||
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
|
||||
{
|
||||
if (rtl_readphy(tp, reg) != val)
|
||||
netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
|
||||
else
|
||||
rtl_apply_firmware(tp);
|
||||
}
|
||||
|
||||
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
|
||||
|
@ -2304,10 +2319,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
|
|||
|
||||
rtl_writephy(tp, 0x1f, 0x0005);
|
||||
rtl_writephy(tp, 0x05, 0x001b);
|
||||
if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
|
||||
(rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
|
||||
netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
|
||||
}
|
||||
|
||||
rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
|
||||
|
||||
rtl_writephy(tp, 0x1f, 0x0000);
|
||||
}
|
||||
|
@ -2409,10 +2422,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
|
|||
|
||||
rtl_writephy(tp, 0x1f, 0x0005);
|
||||
rtl_writephy(tp, 0x05, 0x001b);
|
||||
if ((rtl_readphy(tp, 0x06) != 0xb300) ||
|
||||
(rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
|
||||
netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
|
||||
}
|
||||
|
||||
rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
|
||||
|
||||
rtl_writephy(tp, 0x1f, 0x0000);
|
||||
}
|
||||
|
@ -2567,16 +2578,14 @@ static void rtl8168e_hw_phy_config(struct rtl8169_private *tp)
|
|||
|
||||
static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
|
||||
{
|
||||
if (rtl_apply_firmware(tp, FIRMWARE_8168E_1) < 0)
|
||||
netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
|
||||
rtl_apply_firmware(tp);
|
||||
|
||||
rtl8168e_hw_phy_config(tp);
|
||||
}
|
||||
|
||||
static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
|
||||
{
|
||||
if (rtl_apply_firmware(tp, FIRMWARE_8168E_2) < 0)
|
||||
netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
|
||||
rtl_apply_firmware(tp);
|
||||
|
||||
rtl8168e_hw_phy_config(tp);
|
||||
}
|
||||
|
@ -2619,8 +2628,7 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
|
|||
rtl_writephy(tp, 0x18, 0x0310);
|
||||
msleep(100);
|
||||
|
||||
if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0)
|
||||
netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
|
||||
rtl_apply_firmware(tp);
|
||||
|
||||
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
|
||||
}
|
||||
|
@ -3463,6 +3471,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
tp->timer.data = (unsigned long) dev;
|
||||
tp->timer.function = rtl8169_phy_timer;
|
||||
|
||||
tp->fw = RTL_FIRMWARE_UNKNOWN;
|
||||
|
||||
rc = register_netdev(dev);
|
||||
if (rc < 0)
|
||||
goto err_out_msi_4;
|
||||
|
@ -3515,10 +3525,10 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
|
|||
|
||||
cancel_delayed_work_sync(&tp->task);
|
||||
|
||||
rtl_release_firmware(tp);
|
||||
|
||||
unregister_netdev(dev);
|
||||
|
||||
rtl_release_firmware(tp);
|
||||
|
||||
if (pci_dev_run_wake(pdev))
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
|
||||
|
@ -3530,6 +3540,37 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
|
|||
pci_set_drvdata(pdev, NULL);
|
||||
}
|
||||
|
||||
static void rtl_request_firmware(struct rtl8169_private *tp)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Return early if the firmware is already loaded / cached. */
|
||||
if (!IS_ERR(tp->fw))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(rtl_firmware_infos); i++) {
|
||||
const struct rtl_firmware_info *info = rtl_firmware_infos + i;
|
||||
|
||||
if (info->mac_version == tp->mac_version) {
|
||||
const char *name = info->fw_name;
|
||||
int rc;
|
||||
|
||||
rc = request_firmware(&tp->fw, name, &tp->pci_dev->dev);
|
||||
if (rc < 0) {
|
||||
netif_warn(tp, ifup, tp->dev, "unable to load "
|
||||
"firmware patch %s (%d)\n", name, rc);
|
||||
goto out_disable_request_firmware;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out_disable_request_firmware:
|
||||
tp->fw = NULL;
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
static int rtl8169_open(struct net_device *dev)
|
||||
{
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
|
@ -3561,11 +3602,13 @@ static int rtl8169_open(struct net_device *dev)
|
|||
|
||||
smp_mb();
|
||||
|
||||
rtl_request_firmware(tp);
|
||||
|
||||
retval = request_irq(dev->irq, rtl8169_interrupt,
|
||||
(tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
|
||||
dev->name, dev);
|
||||
if (retval < 0)
|
||||
goto err_release_ring_2;
|
||||
goto err_release_fw_2;
|
||||
|
||||
napi_enable(&tp->napi);
|
||||
|
||||
|
@ -3586,7 +3629,8 @@ static int rtl8169_open(struct net_device *dev)
|
|||
out:
|
||||
return retval;
|
||||
|
||||
err_release_ring_2:
|
||||
err_release_fw_2:
|
||||
rtl_release_firmware(tp);
|
||||
rtl8169_rx_clear(tp);
|
||||
err_free_rx_1:
|
||||
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
|
||||
|
|
|
@ -54,13 +54,13 @@
|
|||
#include <linux/usb/usbnet.h>
|
||||
#include <linux/usb/cdc.h>
|
||||
|
||||
#define DRIVER_VERSION "7-Feb-2011"
|
||||
#define DRIVER_VERSION "23-Apr-2011"
|
||||
|
||||
/* CDC NCM subclass 3.2.1 */
|
||||
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
|
||||
|
||||
/* Maximum NTB length */
|
||||
#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
|
||||
#define CDC_NCM_NTB_MAX_SIZE_TX (16384 + 4) /* bytes, must be short terminated */
|
||||
#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
|
||||
|
||||
/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
|
||||
|
|
|
@ -503,7 +503,7 @@ bool ath_stoprecv(struct ath_softc *sc)
|
|||
"confusing the DMA engine when we start RX up\n");
|
||||
ATH_DBG_WARN_ON_ONCE(!stopped);
|
||||
}
|
||||
return stopped || reset;
|
||||
return stopped && !reset;
|
||||
}
|
||||
|
||||
void ath_flushrecv(struct ath_softc *sc)
|
||||
|
|
|
@ -1127,12 +1127,16 @@ int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
|
|||
q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
|
||||
|
||||
tx_info = &txq->txb[txq->q.read_ptr];
|
||||
iwl4965_tx_status(priv, tx_info,
|
||||
txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
|
||||
|
||||
if (WARN_ON_ONCE(tx_info->skb == NULL))
|
||||
continue;
|
||||
|
||||
hdr = (struct ieee80211_hdr *)tx_info->skb->data;
|
||||
if (hdr && ieee80211_is_data_qos(hdr->frame_control))
|
||||
if (ieee80211_is_data_qos(hdr->frame_control))
|
||||
nfreed++;
|
||||
|
||||
iwl4965_tx_status(priv, tx_info,
|
||||
txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
|
||||
tx_info->skb = NULL;
|
||||
|
||||
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
|
||||
|
|
|
@ -336,7 +336,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
|
|||
struct ieee80211_channel *channel = conf->channel;
|
||||
const struct iwl_channel_info *ch_info;
|
||||
int ret = 0;
|
||||
bool ht_changed[NUM_IWL_RXON_CTX] = {};
|
||||
|
||||
IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
|
||||
|
||||
|
@ -384,10 +383,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
|
|||
|
||||
for_each_context(priv, ctx) {
|
||||
/* Configure HT40 channels */
|
||||
if (ctx->ht.enabled != conf_is_ht(conf)) {
|
||||
if (ctx->ht.enabled != conf_is_ht(conf))
|
||||
ctx->ht.enabled = conf_is_ht(conf);
|
||||
ht_changed[ctx->ctxid] = true;
|
||||
}
|
||||
|
||||
if (ctx->ht.enabled) {
|
||||
if (conf_is_ht40_minus(conf)) {
|
||||
|
@ -456,8 +453,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
|
|||
if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
|
||||
continue;
|
||||
iwlagn_commit_rxon(priv, ctx);
|
||||
if (ht_changed[ctx->ctxid])
|
||||
iwlagn_update_qos(priv, ctx);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
|
|
@ -1236,12 +1236,16 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
|
|||
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
|
||||
|
||||
tx_info = &txq->txb[txq->q.read_ptr];
|
||||
iwlagn_tx_status(priv, tx_info,
|
||||
txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
|
||||
|
||||
if (WARN_ON_ONCE(tx_info->skb == NULL))
|
||||
continue;
|
||||
|
||||
hdr = (struct ieee80211_hdr *)tx_info->skb->data;
|
||||
if (hdr && ieee80211_is_data_qos(hdr->frame_control))
|
||||
if (ieee80211_is_data_qos(hdr->frame_control))
|
||||
nfreed++;
|
||||
|
||||
iwlagn_tx_status(priv, tx_info,
|
||||
txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
|
||||
tx_info->skb = NULL;
|
||||
|
||||
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
|
||||
|
|
|
@ -586,10 +586,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
|||
hci_req_cancel(hdev, ENODEV);
|
||||
hci_req_lock(hdev);
|
||||
|
||||
/* Stop timer, it might be running */
|
||||
del_timer_sync(&hdev->cmd_timer);
|
||||
|
||||
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
|
||||
del_timer_sync(&hdev->cmd_timer);
|
||||
hci_req_unlock(hdev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -628,6 +626,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
|||
|
||||
/* Drop last sent command */
|
||||
if (hdev->sent_cmd) {
|
||||
del_timer_sync(&hdev->cmd_timer);
|
||||
kfree_skb(hdev->sent_cmd);
|
||||
hdev->sent_cmd = NULL;
|
||||
}
|
||||
|
|
|
@ -2419,8 +2419,6 @@ static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *s
|
|||
if (!conn)
|
||||
goto unlock;
|
||||
|
||||
hci_conn_hold(conn);
|
||||
|
||||
conn->remote_cap = ev->capability;
|
||||
conn->remote_oob = ev->oob_data;
|
||||
conn->remote_auth = ev->authentication;
|
||||
|
|
|
@ -1079,6 +1079,7 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
|
|||
tx_skb = skb_clone(skb, GFP_ATOMIC);
|
||||
bt_cb(skb)->retries++;
|
||||
control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
|
||||
control &= L2CAP_CTRL_SAR;
|
||||
|
||||
if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
|
||||
control |= L2CAP_CTRL_FINAL;
|
||||
|
|
|
@ -369,6 +369,15 @@ static void __sco_sock_close(struct sock *sk)
|
|||
|
||||
case BT_CONNECTED:
|
||||
case BT_CONFIG:
|
||||
if (sco_pi(sk)->conn) {
|
||||
sk->sk_state = BT_DISCONN;
|
||||
sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
|
||||
hci_conn_put(sco_pi(sk)->conn->hcon);
|
||||
sco_pi(sk)->conn = NULL;
|
||||
} else
|
||||
sco_chan_del(sk, ECONNRESET);
|
||||
break;
|
||||
|
||||
case BT_CONNECT:
|
||||
case BT_DISCONN:
|
||||
sco_chan_del(sk, ECONNRESET);
|
||||
|
|
|
@ -165,7 +165,7 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
|
|||
goto drop;
|
||||
|
||||
/* If STP is turned off, then forward */
|
||||
if (p->br->stp_enabled == BR_NO_STP)
|
||||
if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
|
||||
goto forward;
|
||||
|
||||
if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
|
||||
|
|
|
@ -1427,9 +1427,14 @@ static int bcm_init(struct sock *sk)
|
|||
static int bcm_release(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct bcm_sock *bo = bcm_sk(sk);
|
||||
struct bcm_sock *bo;
|
||||
struct bcm_op *op, *next;
|
||||
|
||||
if (sk == NULL)
|
||||
return 0;
|
||||
|
||||
bo = bcm_sk(sk);
|
||||
|
||||
/* remove bcm_ops, timer, rx_unregister(), etc. */
|
||||
|
||||
unregister_netdevice_notifier(&bo->notifier);
|
||||
|
|
|
@ -305,7 +305,12 @@ static int raw_init(struct sock *sk)
|
|||
static int raw_release(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct raw_sock *ro = raw_sk(sk);
|
||||
struct raw_sock *ro;
|
||||
|
||||
if (!sk)
|
||||
return 0;
|
||||
|
||||
ro = raw_sk(sk);
|
||||
|
||||
unregister_netdevice_notifier(&ro->notifier);
|
||||
|
||||
|
|
|
@ -2692,6 +2692,12 @@ static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
|
|||
{
|
||||
}
|
||||
|
||||
static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
|
||||
unsigned long old)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dst_ops ipv4_dst_blackhole_ops = {
|
||||
.family = AF_INET,
|
||||
.protocol = cpu_to_be16(ETH_P_IP),
|
||||
|
@ -2700,6 +2706,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
|
|||
.default_mtu = ipv4_blackhole_default_mtu,
|
||||
.default_advmss = ipv4_default_advmss,
|
||||
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
|
||||
.cow_metrics = ipv4_rt_blackhole_cow_metrics,
|
||||
};
|
||||
|
||||
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
|
||||
|
|
|
@ -153,6 +153,12 @@ static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
|
|||
{
|
||||
}
|
||||
|
||||
static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
|
||||
unsigned long old)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dst_ops ip6_dst_blackhole_ops = {
|
||||
.family = AF_INET6,
|
||||
.protocol = cpu_to_be16(ETH_P_IPV6),
|
||||
|
@ -161,6 +167,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
|
|||
.default_mtu = ip6_blackhole_default_mtu,
|
||||
.default_advmss = ip6_default_advmss,
|
||||
.update_pmtu = ip6_rt_blackhole_update_pmtu,
|
||||
.cow_metrics = ip6_rt_blackhole_cow_metrics,
|
||||
};
|
||||
|
||||
static const u32 ip6_template_metrics[RTAX_MAX] = {
|
||||
|
@ -2022,7 +2029,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
|
|||
rt->dst.output = ip6_output;
|
||||
rt->rt6i_dev = net->loopback_dev;
|
||||
rt->rt6i_idev = idev;
|
||||
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1);
|
||||
rt->dst.obsolete = -1;
|
||||
|
||||
rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
|
||||
|
|
|
@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
|
|||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
/* Check if there is enough headroom to insert fragment header. */
|
||||
if ((skb_headroom(skb) < frag_hdr_sz) &&
|
||||
if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
|
||||
pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1526,6 +1526,8 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
|
|||
enum ieee80211_smps_mode old_req;
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&sdata->u.mgd.mtx);
|
||||
|
||||
old_req = sdata->u.mgd.req_smps;
|
||||
sdata->u.mgd.req_smps = smps_mode;
|
||||
|
||||
|
|
|
@ -177,9 +177,9 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
|
|||
if (sdata->vif.type != NL80211_IFTYPE_STATION)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&local->iflist_mtx);
|
||||
mutex_lock(&sdata->u.mgd.mtx);
|
||||
err = __ieee80211_request_smps(sdata, smps_mode);
|
||||
mutex_unlock(&local->iflist_mtx);
|
||||
mutex_unlock(&sdata->u.mgd.mtx);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -343,6 +343,10 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct ipmac data;
|
||||
|
||||
/* MAC can be src only */
|
||||
if (!(flags & IPSET_DIM_TWO_SRC))
|
||||
return 0;
|
||||
|
||||
data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
|
||||
if (data.id < map->first_ip || data.id > map->last_ip)
|
||||
return -IPSET_ERR_BITMAP_RANGE;
|
||||
|
|
|
@ -1022,8 +1022,9 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
if (cb->args[1] >= ip_set_max)
|
||||
goto out;
|
||||
|
||||
pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
|
||||
max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
|
||||
dump_last:
|
||||
pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
|
||||
for (; cb->args[1] < max; cb->args[1]++) {
|
||||
index = (ip_set_id_t) cb->args[1];
|
||||
set = ip_set_list[index];
|
||||
|
@ -1038,8 +1039,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
* so that lists (unions of sets) are dumped last.
|
||||
*/
|
||||
if (cb->args[0] != DUMP_ONE &&
|
||||
!((cb->args[0] == DUMP_ALL) ^
|
||||
(set->type->features & IPSET_DUMP_LAST)))
|
||||
((cb->args[0] == DUMP_ALL) ==
|
||||
!!(set->type->features & IPSET_DUMP_LAST)))
|
||||
continue;
|
||||
pr_debug("List set: %s\n", set->name);
|
||||
if (!cb->args[2]) {
|
||||
|
@ -1083,6 +1084,12 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
goto release_refcount;
|
||||
}
|
||||
}
|
||||
/* If we dump all sets, continue with dumping last ones */
|
||||
if (cb->args[0] == DUMP_ALL) {
|
||||
cb->args[0] = DUMP_LAST;
|
||||
cb->args[1] = 0;
|
||||
goto dump_last;
|
||||
}
|
||||
goto out;
|
||||
|
||||
nla_put_failure:
|
||||
|
@ -1093,11 +1100,6 @@ release_refcount:
|
|||
pr_debug("release set %s\n", ip_set_list[index]->name);
|
||||
ip_set_put_byindex(index);
|
||||
}
|
||||
|
||||
/* If we dump all sets, continue with dumping last ones */
|
||||
if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
|
||||
cb->args[0] = DUMP_LAST;
|
||||
|
||||
out:
|
||||
if (nlh) {
|
||||
nlmsg_end(skb, nlh);
|
||||
|
|
|
@ -81,6 +81,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
|
|||
if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
|
||||
pr_warning("Protocol error: set match dimension "
|
||||
"is over the limit!\n");
|
||||
ip_set_nfnl_put(info->match_set.index);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
@ -135,6 +136,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
|
|||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warning("Cannot find del_set index %u as target\n",
|
||||
info->del_set.index);
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(info->add_set.index);
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
@ -142,6 +145,10 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
|
|||
info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
|
||||
pr_warning("Protocol error: SET target dimension "
|
||||
"is over the limit!\n");
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(info->add_set.index);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(info->del_set.index);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
@ -192,6 +199,7 @@ set_match_checkentry(const struct xt_mtchk_param *par)
|
|||
if (info->match_set.dim > IPSET_DIM_MAX) {
|
||||
pr_warning("Protocol error: set match dimension "
|
||||
"is over the limit!\n");
|
||||
ip_set_nfnl_put(info->match_set.index);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
@ -219,7 +227,7 @@ set_target(struct sk_buff *skb, const struct xt_action_param *par)
|
|||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
ip_set_del(info->del_set.index,
|
||||
skb, par->family,
|
||||
info->add_set.dim,
|
||||
info->del_set.dim,
|
||||
info->del_set.flags);
|
||||
|
||||
return XT_CONTINUE;
|
||||
|
@ -245,13 +253,19 @@ set_target_checkentry(const struct xt_tgchk_param *par)
|
|||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warning("Cannot find del_set index %u as target\n",
|
||||
info->del_set.index);
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(info->add_set.index);
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
if (info->add_set.dim > IPSET_DIM_MAX ||
|
||||
info->del_set.flags > IPSET_DIM_MAX) {
|
||||
info->del_set.dim > IPSET_DIM_MAX) {
|
||||
pr_warning("Protocol error: SET target dimension "
|
||||
"is over the limit!\n");
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(info->add_set.index);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(info->del_set.index);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue