Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "Several small fixes here:

   1) Don't crash in tg3 driver when the number of tx queues has been
      configured to be different from the number of rx queues.  From
      Thadeu Lima de Souza Cascardo.

   2) VLAN filter not disabled properly in promisc mode in ixgbe driver,
      from Vlad Yasevich.

   3) Fix OOPS on dellink op in VTI tunnel driver, from Xin Long.

   4) IPV6 GRE driver WCCP code checks skb->protocol for ETH_P_IP
      instead of ETH_P_IPV6, whoops.  From Yuri Chislov.

   5) Socket matching in ping driver is buggy when packet AF does not
      match socket's AF.  Fix from Jane Zhou.

   6) Fix checksum calculation errors in VXLAN due to where the
      udp_tunnel6_xmit_skb() helper gets it's saddr/daddr from.  From
      Alexander Duyck.

   7) Fix 5G detection problem in rtlwifi driver, from Larry Finger.

   8) Fix NULL deref in tcp_v{4,6}_send_reset, from Eric Dumazet.

   9) Various missing netlink attribute verifications in bridging code,
      from Thomas Graf.

  10) tcp_recvmsg() unconditionally calls ipv4 ip_recv_error even for
      ipv6 sockets, whoops.  Fix from Willem de Bruijn"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (29 commits)
  net-timestamp: make tcp_recvmsg call ipv6_recv_error for AF_INET6 socks
  bridge: Sanitize IFLA_EXT_MASK for AF_BRIDGE:RTM_GETLINK
  bridge: Add missing policy entry for IFLA_BRPORT_FAST_LEAVE
  net: Check for presence of IFLA_AF_SPEC
  net: Validate IFLA_BRIDGE_MODE attribute length
  bridge: Validate IFLA_BRIDGE_FLAGS attribute length
  stmmac: platform: fix default values of the filter bins setting
  net/mlx4_core: Limit count field to 24 bits in qp_alloc_res
  net: dsa: bcm_sf2: reset switch prior to initialization
  net: dsa: bcm_sf2: fix unmapping registers in case of errors
  tg3: fix ring init when there are more TX than RX channels
  tcp: fix possible NULL dereference in tcp_vX_send_reset()
  rtlwifi: Change order in device startup
  rtlwifi: rtl8821ae: Fix 5G detection problem
  Revert "netfilter: conntrack: fix race in __nf_conntrack_confirm against get_next_corpse"
  vxlan: Fix boolean flip in VXLAN_F_UDP_ZERO_CSUM6_[TX|RX]
  ip6_udp_tunnel: Fix checksum calculation
  net-timestamp: Fix a documentation typo
  net/ping: handle protocol mismatching scenario
  af_packet: fix sparse warning
  ...
This commit is contained in:
Linus Torvalds 2014-11-27 18:05:05 -08:00
commit 8e8459719c
30 changed files with 184 additions and 101 deletions

View File

@ -136,7 +136,7 @@ SOF_TIMESTAMPING_OPT_ID:
This option is implemented only for transmit timestamps. There, the This option is implemented only for transmit timestamps. There, the
timestamp is always looped along with a struct sock_extended_err. timestamp is always looped along with a struct sock_extended_err.
The option modifies field ee_info to pass an id that is unique The option modifies field ee_data to pass an id that is unique
among all possibly concurrently outstanding timestamp requests for among all possibly concurrently outstanding timestamp requests for
that socket. In practice, it is a monotonically increasing u32 that socket. In practice, it is a monotonically increasing u32
(that wraps). (that wraps).

View File

@ -1225,11 +1225,13 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE); card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE);
if (!card->config_regs) { if (!card->config_regs) {
dev_warn(&dev->dev, "Failed to ioremap config registers\n"); dev_warn(&dev->dev, "Failed to ioremap config registers\n");
err = -ENOMEM;
goto out_release_regions; goto out_release_regions;
} }
card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE); card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE);
if (!card->buffers) { if (!card->buffers) {
dev_warn(&dev->dev, "Failed to ioremap data buffers\n"); dev_warn(&dev->dev, "Failed to ioremap data buffers\n");
err = -ENOMEM;
goto out_unmap_config; goto out_unmap_config;
} }

View File

@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
{
unsigned int timeout = 1000;
u32 reg;
reg = core_readl(priv, CORE_WATCHDOG_CTRL);
reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
core_writel(priv, reg, CORE_WATCHDOG_CTRL);
do {
reg = core_readl(priv, CORE_WATCHDOG_CTRL);
if (!(reg & SOFTWARE_RESET))
break;
usleep_range(1000, 2000);
} while (timeout-- > 0);
if (timeout == 0)
return -ETIMEDOUT;
return 0;
}
static int bcm_sf2_sw_setup(struct dsa_switch *ds) static int bcm_sf2_sw_setup(struct dsa_switch *ds)
{ {
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
*base = of_iomap(dn, i); *base = of_iomap(dn, i);
if (*base == NULL) { if (*base == NULL) {
pr_err("unable to find register: %s\n", reg_names[i]); pr_err("unable to find register: %s\n", reg_names[i]);
return -ENODEV; ret = -ENOMEM;
goto out_unmap;
} }
base++; base++;
} }
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("unable to software reset switch: %d\n", ret);
goto out_unmap;
}
/* Disable all interrupts and request them */ /* Disable all interrupts and request them */
intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
@ -484,6 +514,7 @@ out_free_irq0:
out_unmap: out_unmap:
base = &priv->core; base = &priv->core;
for (i = 0; i < BCM_SF2_REGS_NUM; i++) { for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
if (*base)
iounmap(*base); iounmap(*base);
base++; base++;
} }
@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
return 0; return 0;
} }
static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
{
unsigned int timeout = 1000;
u32 reg;
reg = core_readl(priv, CORE_WATCHDOG_CTRL);
reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
core_writel(priv, reg, CORE_WATCHDOG_CTRL);
do {
reg = core_readl(priv, CORE_WATCHDOG_CTRL);
if (!(reg & SOFTWARE_RESET))
break;
usleep_range(1000, 2000);
} while (timeout-- > 0);
if (timeout == 0)
return -ETIMEDOUT;
return 0;
}
static int bcm_sf2_sw_resume(struct dsa_switch *ds) static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{ {
struct bcm_sf2_priv *priv = ds_to_priv(ds); struct bcm_sf2_priv *priv = ds_to_priv(ds);

View File

@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp)
if (tnapi->rx_rcb) if (tnapi->rx_rcb)
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { if (tnapi->prodring.rx_std &&
tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
tg3_free_rings(tp); tg3_free_rings(tp);
return -ENOMEM; return -ENOMEM;
} }

View File

@ -4309,11 +4309,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
return -EOPNOTSUPP; return -EOPNOTSUPP;
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) { nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) != IFLA_BRIDGE_MODE) if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue; continue;
if (nla_len(attr) < sizeof(mode))
return -EINVAL;
mode = nla_get_u16(attr); mode = nla_get_u16(attr);
if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
return -EINVAL; return -EINVAL;

View File

@ -1012,6 +1012,7 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
/* igb_get_stats64() might access the rings on this vector, /* igb_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it. * we must wait a grace period before freeing it.
*/ */
if (q_vector)
kfree_rcu(q_vector, rcu); kfree_rcu(q_vector, rcu);
} }
@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter)
adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
for (i = 0; i < adapter->num_q_vectors; i++) { for (i = 0; i < adapter->num_q_vectors; i++) {
napi_synchronize(&(adapter->q_vector[i]->napi)); if (adapter->q_vector[i]) {
napi_disable(&(adapter->q_vector[i]->napi)); napi_synchronize(&adapter->q_vector[i]->napi);
napi_disable(&adapter->q_vector[i]->napi);
}
} }
@ -3717,6 +3720,7 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i])
igb_free_tx_resources(adapter->tx_ring[i]); igb_free_tx_resources(adapter->tx_ring[i]);
} }
@ -3782,6 +3786,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i])
igb_clean_tx_ring(adapter->tx_ring[i]); igb_clean_tx_ring(adapter->tx_ring[i]);
} }
@ -3819,6 +3824,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i])
igb_free_rx_resources(adapter->rx_ring[i]); igb_free_rx_resources(adapter->rx_ring[i]);
} }
@ -3874,6 +3880,7 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i])
igb_clean_rx_ring(adapter->rx_ring[i]); igb_clean_rx_ring(adapter->rx_ring[i]);
} }
@ -7404,6 +7411,8 @@ static int igb_resume(struct device *dev)
pci_restore_state(pdev); pci_restore_state(pdev);
pci_save_state(pdev); pci_save_state(pdev);
if (!pci_device_is_present(pdev))
return -ENODEV;
err = pci_enable_device_mem(pdev); err = pci_enable_device_mem(pdev);
if (err) { if (err) {
dev_err(&pdev->dev, dev_err(&pdev->dev,

View File

@ -3936,8 +3936,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* if SR-IOV and VMDQ are disabled - otherwise ensure * if SR-IOV and VMDQ are disabled - otherwise ensure
* that hardware VLAN filters remain enabled. * that hardware VLAN filters remain enabled.
*/ */
if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
IXGBE_FLAG_SRIOV_ENABLED))) IXGBE_FLAG_SRIOV_ENABLED))
vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
} else { } else {
if (netdev->flags & IFF_ALLMULTI) { if (netdev->flags & IFF_ALLMULTI) {
@ -7669,6 +7669,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) { nla_for_each_nested(attr, br_spec, rem) {
__u16 mode; __u16 mode;
@ -7677,6 +7679,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
if (nla_type(attr) != IFLA_BRIDGE_MODE) if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue; continue;
if (nla_len(attr) < sizeof(mode))
return -EINVAL;
mode = nla_get_u16(attr); mode = nla_get_u16(attr);
if (mode == BRIDGE_MODE_VEPA) { if (mode == BRIDGE_MODE_VEPA) {
reg = 0; reg = 0;
@ -7979,6 +7984,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int i, err, pci_using_dac, expected_gts; int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES; unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH]; u8 part_str[IXGBE_PBANUM_LENGTH];
bool disable_dev = false;
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
u16 device_caps; u16 device_caps;
#endif #endif
@ -8369,13 +8375,14 @@ err_sw_init:
iounmap(adapter->io_addr); iounmap(adapter->io_addr);
kfree(adapter->mac_table); kfree(adapter->mac_table);
err_ioremap: err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev); free_netdev(netdev);
err_alloc_etherdev: err_alloc_etherdev:
pci_release_selected_regions(pdev, pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM)); pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg: err_pci_reg:
err_dma: err_dma:
if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) if (!adapter || disable_dev)
pci_disable_device(pdev); pci_disable_device(pdev);
return err; return err;
} }
@ -8393,6 +8400,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
{ {
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
bool disable_dev;
ixgbe_dbg_adapter_exit(adapter); ixgbe_dbg_adapter_exit(adapter);
@ -8442,11 +8450,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
e_dev_info("complete\n"); e_dev_info("complete\n");
kfree(adapter->mac_table); kfree(adapter->mac_table);
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev); free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev); pci_disable_pcie_error_reporting(pdev);
if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) if (disable_dev)
pci_disable_device(pdev); pci_disable_device(pdev);
} }

View File

@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) { switch (op) {
case RES_OP_RESERVE: case RES_OP_RESERVE:
count = get_param_l(&in_param); count = get_param_l(&in_param) & 0xffffff;
align = get_param_h(&in_param); align = get_param_h(&in_param);
err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err) if (err)

View File

@ -177,12 +177,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
*/ */
plat->maxmtu = JUMBO_LEN; plat->maxmtu = JUMBO_LEN;
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
/* Set default value for unicast filter entries */
plat->unicast_filter_entries = 1;
/* /*
* Currently only the properties needed on SPEAr600 * Currently only the properties needed on SPEAr600
* are provided. All other properties should be added * are provided. All other properties should be added
@ -270,6 +264,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
return PTR_ERR(addr); return PTR_ERR(addr);
plat_dat = dev_get_platdata(&pdev->dev); plat_dat = dev_get_platdata(&pdev->dev);
/* Set default value for multicast hash bins */
plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
/* Set default value for unicast filter entries */
plat_dat->unicast_filter_entries = 1;
if (pdev->dev.of_node) { if (pdev->dev.of_node) {
if (!plat_dat) if (!plat_dat)
plat_dat = devm_kzalloc(&pdev->dev, plat_dat = devm_kzalloc(&pdev->dev,

View File

@ -2306,9 +2306,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
if (ipv6) { if (ipv6) {
udp_conf.family = AF_INET6; udp_conf.family = AF_INET6;
udp_conf.use_udp6_tx_checksums = udp_conf.use_udp6_tx_checksums =
!!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
udp_conf.use_udp6_rx_checksums = udp_conf.use_udp6_rx_checksums =
!!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
} else { } else {
udp_conf.family = AF_INET; udp_conf.family = AF_INET;
udp_conf.local_ip.s_addr = INADDR_ANY; udp_conf.local_ip.s_addr = INADDR_ANY;

View File

@ -155,6 +155,7 @@ enum iwl_ucode_tlv_api {
* @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
* @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
* which also implies support for the scheduler configuration command * which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
*/ */
enum iwl_ucode_tlv_capa { enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
@ -163,6 +164,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
}; };
/* The default calibrate table size if not specified by firmware file */ /* The default calibrate table size if not specified by firmware file */

View File

@ -2448,10 +2448,16 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
switch (vif->type) { switch (vif->type) {
case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_STATION:
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
/* Use aux roc framework (HS20) */ /* Use aux roc framework (HS20) */
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
vif, duration); vif, duration);
goto out_unlock; goto out_unlock;
}
IWL_ERR(mvm, "hotspot not supported\n");
ret = -EINVAL;
goto out_unlock;
case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_P2P_DEVICE:
/* handle below */ /* handle below */
break; break;

View File

@ -2249,6 +2249,16 @@ int rtl_pci_probe(struct pci_dev *pdev,
/*like read eeprom and so on */ /*like read eeprom and so on */
rtlpriv->cfg->ops->read_eeprom_info(hw); rtlpriv->cfg->ops->read_eeprom_info(hw);
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
err = -ENODEV;
goto fail3;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
/*aspm */
rtl_pci_init_aspm(hw);
/* Init mac80211 sw */ /* Init mac80211 sw */
err = rtl_init_core(hw); err = rtl_init_core(hw);
if (err) { if (err) {
@ -2264,16 +2274,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
goto fail3; goto fail3;
} }
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
err = -ENODEV;
goto fail3;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
/*aspm */
rtl_pci_init_aspm(hw);
err = ieee80211_register_hw(hw); err = ieee80211_register_hw(hw);
if (err) { if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,

View File

@ -3672,8 +3672,9 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_ADHOC) mac->opmode == NL80211_IFTYPE_ADHOC)
macid = sta->aid + 1; macid = sta->aid + 1;
if (wirelessmode == WIRELESS_MODE_N_5G || if (wirelessmode == WIRELESS_MODE_N_5G ||
wirelessmode == WIRELESS_MODE_AC_5G) wirelessmode == WIRELESS_MODE_AC_5G ||
ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ]; wirelessmode == WIRELESS_MODE_A)
ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4;
else else
ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ]; ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];

View File

@ -39,7 +39,7 @@ struct backend_info {
static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
static void connect(struct backend_info *be); static void connect(struct backend_info *be);
static int read_xenbus_vif_flags(struct backend_info *be); static int read_xenbus_vif_flags(struct backend_info *be);
static void backend_create_xenvif(struct backend_info *be); static int backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be); static void unregister_hotplug_status_watch(struct backend_info *be);
static void set_backend_state(struct backend_info *be, static void set_backend_state(struct backend_info *be,
enum xenbus_state state); enum xenbus_state state);
@ -352,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev,
be->state = XenbusStateInitWait; be->state = XenbusStateInitWait;
/* This kicks hotplug scripts, so do it immediately. */ /* This kicks hotplug scripts, so do it immediately. */
backend_create_xenvif(be); err = backend_create_xenvif(be);
if (err)
goto fail;
return 0; return 0;
@ -397,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev,
} }
static void backend_create_xenvif(struct backend_info *be) static int backend_create_xenvif(struct backend_info *be)
{ {
int err; int err;
long handle; long handle;
struct xenbus_device *dev = be->dev; struct xenbus_device *dev = be->dev;
if (be->vif != NULL) if (be->vif != NULL)
return; return 0;
err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
if (err != 1) { if (err != 1) {
xenbus_dev_fatal(dev, err, "reading handle"); xenbus_dev_fatal(dev, err, "reading handle");
return; return (err < 0) ? err : -EINVAL;
} }
be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
@ -417,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be)
err = PTR_ERR(be->vif); err = PTR_ERR(be->vif);
be->vif = NULL; be->vif = NULL;
xenbus_dev_fatal(dev, err, "creating interface"); xenbus_dev_fatal(dev, err, "creating interface");
return; return err;
} }
kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
return 0;
} }
static void backend_disconnect(struct backend_info *be) static void backend_disconnect(struct backend_info *be)

View File

@ -37,6 +37,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int inet_ctl_sock_create(struct sock **sk, unsigned short family, int inet_ctl_sock_create(struct sock **sk, unsigned short family,
unsigned short type, unsigned char protocol, unsigned short type, unsigned char protocol,
struct net *net); struct net *net);
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len);
static inline void inet_ctl_sock_destroy(struct sock *sk) static inline void inet_ctl_sock_destroy(struct sock *sk)
{ {

View File

@ -280,6 +280,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_MODE] = { .type = NLA_U8 }, [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
[IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
[IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
[IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
}; };

View File

@ -2685,13 +2685,20 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
int idx = 0; int idx = 0;
u32 portid = NETLINK_CB(cb->skb).portid; u32 portid = NETLINK_CB(cb->skb).portid;
u32 seq = cb->nlh->nlmsg_seq; u32 seq = cb->nlh->nlmsg_seq;
struct nlattr *extfilt;
u32 filter_mask = 0; u32 filter_mask = 0;
if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
struct nlattr *extfilt;
extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
IFLA_EXT_MASK); IFLA_EXT_MASK);
if (extfilt) if (extfilt) {
if (nla_len(extfilt) < sizeof(filter_mask))
return -EINVAL;
filter_mask = nla_get_u32(extfilt); filter_mask = nla_get_u32(extfilt);
}
}
rcu_read_lock(); rcu_read_lock();
for_each_netdev_rcu(net, dev) { for_each_netdev_rcu(net, dev) {
@ -2798,6 +2805,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_spec) { if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) { nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
if (nla_len(attr) < sizeof(flags))
return -EINVAL;
have_flags = true; have_flags = true;
flags = nla_get_u16(attr); flags = nla_get_u16(attr);
break; break;
@ -2868,6 +2878,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_spec) { if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) { nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
if (nla_len(attr) < sizeof(flags))
return -EINVAL;
have_flags = true; have_flags = true;
flags = nla_get_u16(attr); flags = nla_get_u16(attr);
break; break;

View File

@ -1386,6 +1386,17 @@ out:
return pp; return pp;
} }
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
if (sk->sk_family == AF_INET)
return ip_recv_error(sk, msg, len, addr_len);
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
#endif
return -EINVAL;
}
static int inet_gro_complete(struct sk_buff *skb, int nhoff) static int inet_gro_complete(struct sk_buff *skb, int nhoff)
{ {
__be16 newlen = htons(skb->len - nhoff); __be16 newlen = htons(skb->len - nhoff);

View File

@ -528,6 +528,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
.validate = vti_tunnel_validate, .validate = vti_tunnel_validate,
.newlink = vti_newlink, .newlink = vti_newlink,
.changelink = vti_changelink, .changelink = vti_changelink,
.dellink = ip_tunnel_dellink,
.get_size = vti_get_size, .get_size = vti_get_size,
.fill_info = vti_fill_info, .fill_info = vti_fill_info,
}; };

View File

@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
&ipv6_hdr(skb)->daddr)) &ipv6_hdr(skb)->daddr))
continue; continue;
#endif #endif
} else {
continue;
} }
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
@ -853,16 +855,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (flags & MSG_OOB) if (flags & MSG_OOB)
goto out; goto out;
if (flags & MSG_ERRQUEUE) { if (flags & MSG_ERRQUEUE)
if (family == AF_INET) { return inet_recv_error(sk, msg, len, addr_len);
return ip_recv_error(sk, msg, len, addr_len);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
return pingv6_ops.ipv6_recv_error(sk, msg, len,
addr_len);
#endif
}
}
skb = skb_recv_datagram(sk, flags, noblock, &err); skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) if (!skb)

View File

@ -1598,7 +1598,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
u32 urg_hole = 0; u32 urg_hole = 0;
if (unlikely(flags & MSG_ERRQUEUE)) if (unlikely(flags & MSG_ERRQUEUE))
return ip_recv_error(sk, msg, len, addr_len); return inet_recv_error(sk, msg, len, addr_len);
if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
(sk->sk_state == TCP_ESTABLISHED)) (sk->sk_state == TCP_ESTABLISHED))

View File

@ -598,7 +598,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
if (th->rst) if (th->rst)
return; return;
if (skb_rtable(skb)->rt_type != RTN_LOCAL) /* If sk not NULL, it means we did a successful lookup and incoming
* route had to be correct. prequeue might have dropped our dst.
*/
if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
return; return;
/* Swap the send and the receive. */ /* Swap the send and the receive. */

View File

@ -502,11 +502,11 @@ static int ip6gre_rcv(struct sk_buff *skb)
skb->protocol = gre_proto; skb->protocol = gre_proto;
/* WCCP version 1 and 2 protocol decoding. /* WCCP version 1 and 2 protocol decoding.
* - Change protocol to IP * - Change protocol to IPv6
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/ */
if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
skb->protocol = htons(ETH_P_IP); skb->protocol = htons(ETH_P_IPV6);
if ((*(h + offset) & 0xF0) != 0x40) if ((*(h + offset) & 0xF0) != 0x40)
offset += 4; offset += 4;
} }

View File

@ -69,7 +69,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
int nhoff; int nhoff;
if (unlikely(skb_shinfo(skb)->gso_type & if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_UDP | ~(SKB_GSO_TCPV4 |
SKB_GSO_UDP |
SKB_GSO_DODGY | SKB_GSO_DODGY |
SKB_GSO_TCP_ECN | SKB_GSO_TCP_ECN |
SKB_GSO_GRE | SKB_GSO_GRE |

View File

@ -79,15 +79,13 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
uh->source = src_port; uh->source = src_port;
uh->len = htons(skb->len); uh->len = htons(skb->len);
uh->check = 0;
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
| IPSKB_REROUTED); | IPSKB_REROUTED);
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len);
&sk->sk_v6_daddr, skb->len);
__skb_push(skb, sizeof(*ip6h)); __skb_push(skb, sizeof(*ip6h));
skb_reset_network_header(skb); skb_reset_network_header(skb);

View File

@ -905,6 +905,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev,
return vti6_tnl_create2(dev); return vti6_tnl_create2(dev);
} }
static void vti6_dellink(struct net_device *dev, struct list_head *head)
{
struct net *net = dev_net(dev);
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
if (dev != ip6n->fb_tnl_dev)
unregister_netdevice_queue(dev, head);
}
static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[]) struct nlattr *data[])
{ {
@ -980,6 +989,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
.setup = vti6_dev_setup, .setup = vti6_dev_setup,
.validate = vti6_validate, .validate = vti6_validate,
.newlink = vti6_newlink, .newlink = vti6_newlink,
.dellink = vti6_dellink,
.changelink = vti6_changelink, .changelink = vti6_changelink,
.get_size = vti6_get_size, .get_size = vti6_get_size,
.fill_info = vti6_fill_info, .fill_info = vti6_fill_info,
@ -1020,6 +1030,7 @@ static int __net_init vti6_init_net(struct net *net)
if (!ip6n->fb_tnl_dev) if (!ip6n->fb_tnl_dev)
goto err_alloc_dev; goto err_alloc_dev;
dev_net_set(ip6n->fb_tnl_dev, net); dev_net_set(ip6n->fb_tnl_dev, net);
ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops;
err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
if (err < 0) if (err < 0)

View File

@ -903,7 +903,10 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
if (th->rst) if (th->rst)
return; return;
if (!ipv6_unicast_destination(skb)) /* If sk not NULL, it means we did a successful lookup and incoming
* route had to be correct. prequeue might have dropped our dst.
*/
if (!sk && !ipv6_unicast_destination(skb))
return; return;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG

View File

@ -611,16 +611,12 @@ __nf_conntrack_confirm(struct sk_buff *skb)
*/ */
NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
pr_debug("Confirming conntrack %p\n", ct); pr_debug("Confirming conntrack %p\n", ct);
/* We have to check the DYING flag inside the lock to prevent
/* We have to check the DYING flag after unlink to prevent a race against nf_ct_get_next_corpse() possibly called from
* a race against nf_ct_get_next_corpse() possibly called from user context, else we insert an already 'dead' hash, blocking
* user context, else we insert an already 'dead' hash, blocking further use of that particular connection -JM */
* further use of that particular connection -JM.
*/
nf_ct_del_from_dying_or_unconfirmed_list(ct);
if (unlikely(nf_ct_is_dying(ct))) { if (unlikely(nf_ct_is_dying(ct))) {
nf_ct_add_to_dying_list(ct);
nf_conntrack_double_unlock(hash, reply_hash); nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable(); local_bh_enable();
return NF_ACCEPT; return NF_ACCEPT;
@ -640,6 +636,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
goto out; goto out;
nf_ct_del_from_dying_or_unconfirmed_list(ct);
/* Timer relative to confirmation time, not original /* Timer relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in setting time, otherwise we'd get timer wrap in
weird delay cases. */ weird delay cases. */

View File

@ -378,7 +378,7 @@ static void unregister_prot_hook(struct sock *sk, bool sync)
__unregister_prot_hook(sk, sync); __unregister_prot_hook(sk, sync);
} }
static inline __pure struct page *pgv_to_page(void *addr) static inline struct page * __pure pgv_to_page(void *addr)
{ {
if (is_vmalloc_addr(addr)) if (is_vmalloc_addr(addr))
return vmalloc_to_page(addr); return vmalloc_to_page(addr);