diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 5c1bc995e560..f10221f40803 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv, skb_frag_size_set(frag, size); skb->data_len += size; - skb->truesize += size; + skb->truesize += PAGE_SIZE; } else skb_put(skb, length); @@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; int buf_size; + int tailroom; u64 *mapping; - if (ipoib_ud_need_sg(priv->max_ib_mtu)) + if (ipoib_ud_need_sg(priv->max_ib_mtu)) { buf_size = IPOIB_UD_HEAD_SIZE; - else + tailroom = 128; /* reserve some tailroom for IP/TCP headers */ + } else { buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); + tailroom = 0; + } - skb = dev_alloc_skb(buf_size + 4); + skb = dev_alloc_skb(buf_size + tailroom + 4); if (unlikely(!skb)) return NULL; diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 1a0ae4445ff2..5f21f629b7ae 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb) skb = NULL; else if (*debug & DEBUG_SEND_ERR) printk(KERN_DEBUG - "%s ch%d mgr prim(%x) addr(%x) err %d\n", - __func__, ch->nr, hh->prim, ch->addr, ret); + "%s mgr prim(%x) err %d\n", + __func__, hh->prim, ret); } out: mutex_unlock(&st->lmutex); diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index 3680aa251dea..2cf084eb9d52 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -6,7 +6,7 @@ #include "bonding.h" #include "bond_alb.h" -#ifdef CONFIG_DEBUG_FS +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS) #include #include diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f5a40b925f5e..4ddcc3e41dab 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3230,6 +3230,12 @@ static int bond_master_netdev_event(unsigned long event, switch (event) { case NETDEV_CHANGENAME: return bond_event_changename(event_bond); + case NETDEV_UNREGISTER: + bond_remove_proc_entry(event_bond); + break; + case NETDEV_REGISTER: + bond_create_proc_entry(event_bond); + break; default: break; } @@ -4414,8 +4420,6 @@ static void bond_uninit(struct net_device *bond_dev) bond_work_cancel_all(bond); - bond_remove_proc_entry(bond); - bond_debug_unregister(bond); __hw_addr_flush(&bond->mc_list); @@ -4817,7 +4821,6 @@ static int bond_init(struct net_device *bond_dev) bond_set_lockdep_class(bond_dev); - bond_create_proc_entry(bond); list_add_tail(&bond->bond_list, &bn->dev_list); bond_prepare_sysfs_group(bond); diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 42c13d8280c6..36d3783ebfa0 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter) if ((phy_data & BMSR_LSTATUS) == 0) { /* link down */ netif_carrier_off(netdev); - netif_stop_queue(netdev); hw->hibernate = true; if (atl1c_reset_mac(hw) != 0) if (netif_msg_hw(adapter)) diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 46b8b7d81633..d09c6b583d17 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); - skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); + skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA); if (skb == NULL) return -ENOMEM; mapping = dma_map_single(bp->sdev->dma_dev, skb->data, @@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_unmap_single(bp->sdev->dma_dev, mapping, len, DMA_TO_DEVICE); - bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); + bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) goto err_out; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 1901da153312..0ced154129a9 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -5377,7 +5377,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) int k, last; if (skb == NULL) { - j++; + j = NEXT_TX_BD(j); continue; } @@ -5389,8 +5389,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp) tx_buf->skb = NULL; last = tx_buf->nr_frags; - j++; - for (k = 0; k < last; k++, j++) { + j = NEXT_TX_BD(j); + for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) { tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 650c545705f3..3b4fc61f24cf 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -542,7 +542,8 @@ int cnic_unregister_driver(int ulp_type) } if (atomic_read(&ulp_ops->ref_count) != 0) - netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); + pr_warn("%s: Failed waiting for ref count to go to zero\n", + __func__); return 0; out_unlock: diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index af16f9fbc353..4605f7246687 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2077,10 +2077,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - /* Steal sock reference for processing TX time stamps */ - swap(skb_new->sk, skb->sk); - swap(skb_new->destructor, skb->destructor); - kfree_skb(skb); + if (skb->sk) + skb_set_owner_w(skb_new, skb->sk); + consume_skb(skb); skb = skb_new; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9313f5c84fad..59a3f141feb1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6640,6 +6640,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return -EINVAL; } + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + e_err(drv, "Enable failed, SR-IOV enabled\n"); + return -EINVAL; + } + /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || (hw->mac.type == ixgbe_mac_82598EB && diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index fb8377da1687..4b785e10f2ed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum); - + wmb(); entry = (++priv->cur_tx) % txsize; desc = priv->dma_tx + entry; @@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) len, DMA_TO_DEVICE); desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum); + wmb(); priv->hw->desc->set_tx_owner(desc); priv->tx_skbuff[entry] = NULL; } else { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ea3bc0963bd7..f6b04c1a3672 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1334,6 +1334,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); wmb(); priv->hw->desc->set_tx_owner(desc); + wmb(); } /* Interrupt on completition only for the latest segment */ @@ -1349,6 +1350,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* To avoid raise condition */ priv->hw->desc->set_tx_owner(first); + wmb(); priv->cur_tx++; @@ -1412,6 +1414,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) } wmb(); priv->hw->desc->set_rx_owner(p + entry); + wmb(); } } diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 39ea0674dcde..5c120189ec86 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum) struct mdio_mux_parent_bus *pb = cb->parent; int r; - mutex_lock(&pb->mii_bus->mdio_lock); + /* In theory multiple mdio_mux could be stacked, thus creating + * more than a single level of nesting. But in practice, + * SINGLE_DEPTH_NESTING will cover the vast majority of use + * cases. We use it, instead of trying to handle the general + * case. + */ + mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING); r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); if (r) goto out; @@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id, int r; - mutex_lock(&pb->mii_bus->mdio_lock); + mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING); r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); if (r) goto out; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index b9cc5f703d7d..85c983d52527 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -374,6 +374,15 @@ static const struct driver_info qmi_wwan_force_int1 = { .data = BIT(1), /* interface whitelist bitmap */ }; +static const struct driver_info qmi_wwan_force_int2 = { + .description = "Qualcomm WWAN/QMI device", + .flags = FLAG_WWAN, + .bind = qmi_wwan_bind_shared, + .unbind = qmi_wwan_unbind, + .manage_power = qmi_wwan_manage_power, + .data = BIT(2), /* interface whitelist bitmap */ +}; + static const struct driver_info qmi_wwan_force_int3 = { .description = "Qualcomm WWAN/QMI device", .flags = FLAG_WWAN, @@ -526,6 +535,15 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_force_int4, }, + { /* ZTE MF60 */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x19d2, + .idProduct = 0x1402, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_force_int2, + }, { /* Sierra Wireless MC77xx in QMI mode */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1199, diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index ff50cb4290e4..2d3c6644f82d 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c @@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { - bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index d24eaf89ffb5..34f61a0581a2 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il, return 0; } - if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { + if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) { IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx, key_flags); spin_unlock_irqrestore(&il->sta_lock, flags); @@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il, memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; - il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; + il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx; il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 763c7529921b..0f8a7703eea3 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -4768,14 +4768,12 @@ il_bg_watchdog(unsigned long data) return; /* monitor and check for other stuck queues */ - if (il_is_any_associated(il)) { - for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { - /* skip as we already checked the command queue */ - if (cnt == il->cmd_queue) - continue; - if (il_check_stuck_queue(il, cnt)) - return; - } + for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { + /* skip as we already checked the command queue */ + if (cnt == il->cmd_queue) + continue; + if (il_check_stuck_queue(il, cnt)) + return; } mod_timer(&il->watchdog, diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 4b2733af1a0e..3af88b8cfcb7 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -976,6 +976,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, case NL80211_HIDDEN_SSID_ZERO_CONTENTS: /* firmware doesn't support this type of hidden SSID */ default: + kfree(bss_cfg); return -EINVAL; } diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index d357d1ed92f6..74ecc33fdd90 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue) case QID_RX: if (!rt2x00queue_full(queue)) rt2x00queue_for_each_entry(queue, - Q_INDEX_DONE, Q_INDEX, + Q_INDEX_DONE, NULL, rt2x00usb_kick_rx_entry); break; diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index a88fb6939387..e1ce1048fe5f 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -78,7 +78,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) struct net *net = nf_ct_net(ct); struct nf_conntrack_ecache *e; - if (net->ct.nf_conntrack_event_cb == NULL) + if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) return; e = nf_ct_ecache_find(ct); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 6089f0cf23b4..9096bcb08132 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, break; case NETDEV_DOWN: + if (dev->features & NETIF_F_HW_VLAN_FILTER) + vlan_vid_del(dev, 0); + /* Put all VLANs for this dev in the down state too. */ for (i = 0; i < VLAN_N_VID; i++) { vlandev = vlan_group_get_device(grp, i); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 3483e4035cbe..6705d35b17ce 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1381,6 +1381,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv) * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame + * @is_bcast: the packet came in a broadcast packet type. * * bla_rx avoidance checks if: * * we have to race for a claim @@ -1390,7 +1391,8 @@ void batadv_bla_free(struct batadv_priv *bat_priv) * returns 1, otherwise it returns 0 and the caller shall further * process the skb. */ -int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) +int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid, + bool is_bcast) { struct ethhdr *ethhdr; struct batadv_claim search_claim, *claim = NULL; @@ -1409,7 +1411,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) if (unlikely(atomic_read(&bat_priv->bla_num_requests))) /* don't allow broadcasts while requests are in flight */ - if (is_multicast_ether_addr(ethhdr->h_dest)) + if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) goto handled; memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); @@ -1435,8 +1437,13 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) } /* if it is a broadcast ... */ - if (is_multicast_ether_addr(ethhdr->h_dest)) { - /* ... drop it. the responsible gateway is in charge. */ + if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { + /* ... drop it. the responsible gateway is in charge. + * + * We need to check is_bcast because with the gateway + * feature, broadcasts (like DHCP requests) may be sent + * using a unicast packet type. + */ goto handled; } else { /* seems the client considers us as its best gateway. diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 08d13cb1e3df..563cfbf94a7f 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -21,7 +21,8 @@ #define _NET_BATMAN_ADV_BLA_H_ #ifdef CONFIG_BATMAN_ADV_BLA -int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid); +int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid, + bool is_bcast); int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid); int batadv_bla_is_backbone_gw(struct sk_buff *skb, struct batadv_orig_node *orig_node, int hdr_size); @@ -40,7 +41,8 @@ void batadv_bla_free(struct batadv_priv *bat_priv); #else /* ifdef CONFIG_BATMAN_ADV_BLA */ static inline int batadv_bla_rx(struct batadv_priv *bat_priv, - struct sk_buff *skb, short vid) + struct sk_buff *skb, short vid, + bool is_bcast) { return 0; } diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 9e4bb61301ec..109ea2aae96c 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -267,8 +267,12 @@ void batadv_interface_rx(struct net_device *soft_iface, struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; + struct batadv_header *batadv_header = (struct batadv_header *)skb->data; short vid __maybe_unused = -1; __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); + bool is_bcast; + + is_bcast = (batadv_header->packet_type == BATADV_BCAST); /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) @@ -315,7 +319,7 @@ void batadv_interface_rx(struct net_device *soft_iface, /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ - if (batadv_bla_rx(bat_priv, skb, vid)) + if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) goto out; netif_rx(skb); diff --git a/net/core/dev.c b/net/core/dev.c index 5ab6f4b37c0c..73e87c7b4377 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2457,8 +2457,12 @@ static void skb_update_prio(struct sk_buff *skb) { struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); - if ((!skb->priority) && (skb->sk) && map) - skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; + if (!skb->priority && skb->sk && map) { + unsigned int prioidx = skb->sk->sk_cgrp_prioidx; + + if (prioidx < map->priomap_len) + skb->priority = map->priomap[prioidx]; + } } #else #define skb_update_prio(skb) diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 5b8aa2fae48b..3e953eaddbfc 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio) return -ENOSPC; } set_bit(prioidx, prioidx_map); + if (atomic_read(&max_prioidx) < prioidx) + atomic_set(&max_prioidx, prioidx); spin_unlock_irqrestore(&prioidx_map_lock, flags); - atomic_set(&max_prioidx, prioidx); *prio = prioidx; return 0; } @@ -141,7 +142,7 @@ static void cgrp_destroy(struct cgroup *cgrp) rtnl_lock(); for_each_netdev(&init_net, dev) { map = rtnl_dereference(dev->priomap); - if (map) + if (map && cs->prioidx < map->priomap_len) map->priomap[cs->prioidx] = 0; } rtnl_unlock(); @@ -165,7 +166,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft, rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { map = rcu_dereference(dev->priomap); - priority = map ? map->priomap[prioidx] : 0; + priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0; cb->fill(cb, dev->name, priority); } rcu_read_unlock(); diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 6fbb2ad7bb6d..16705611589a 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c @@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, mtu = dev->mtu; pr_debug("name = %s, mtu = %u\n", dev->name, mtu); + if (size > mtu) { + pr_debug("size = %Zu, mtu = %u\n", size, mtu); + err = -EINVAL; + goto out_dev; + } + hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; skb = sock_alloc_send_skb(sk, hlen + tlen + size, @@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, if (err < 0) goto out_skb; - if (size > mtu) { - pr_debug("size = %Zu, mtu = %u\n", size, mtu); - err = -EINVAL; - goto out_skb; - } - skb->dev = dev; skb->sk = sk; skb->protocol = htons(ETH_P_IEEE802154); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index e6fe84a08443..aa69a331f374 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2152,15 +2152,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, mgmt->sa, status_code); ieee80211_destroy_assoc_data(sdata, false); } else { - sdata_info(sdata, "associated\n"); - if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { /* oops -- internal error -- send timeout for now */ - ieee80211_destroy_assoc_data(sdata, true); - sta_info_destroy_addr(sdata, mgmt->bssid); + ieee80211_destroy_assoc_data(sdata, false); cfg80211_put_bss(*bss); return RX_MGMT_CFG80211_ASSOC_TIMEOUT; } + sdata_info(sdata, "associated\n"); /* * destroy assoc_data afterwards, as otherwise an idle diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 2d1acc6c5445..f9e51ef8dfa2 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) max_rates = sband->n_bitrates; } - msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); + msp = kzalloc(sizeof(*msp), gfp); if (!msp) return NULL; diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 035960ec5cb9..c6f7db720d84 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -16,6 +16,7 @@ #include #include +#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) info->del_set.flags, 0, UINT_MAX); /* Normalize to fit into jiffies */ - if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC) + if (add_opt.timeout != IPSET_NO_TIMEOUT && + add_opt.timeout > UINT_MAX/MSEC_PER_SEC) add_opt.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 2c0b317344b7..05ca5a680071 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c @@ -292,7 +292,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, pr_debug("%p\n", sk); - if (llcp_sock == NULL) + if (llcp_sock == NULL || llcp_sock->dev == NULL) return -EBADFD; addr->sa_family = AF_NFC; diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c index 2754f098d436..bebaa43484bc 100644 --- a/net/rxrpc/ar-peer.c +++ b/net/rxrpc/ar-peer.c @@ -229,7 +229,7 @@ found_UDP_peer: return peer; new_UDP_peer: - _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); + _net("Rx UDP DGRAM from NEW peer"); read_unlock_bh(&rxrpc_peer_lock); _leave(" = -EBUSY [new]"); return ERR_PTR(-EBUSY); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index a2a95aabf9c2..c412ad0d0308 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche return PSCHED_NS2TICKS(ticks); } -static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) +static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) { struct sk_buff_head *list = &sch->q; psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; - struct sk_buff *skb; + struct sk_buff *skb = skb_peek_tail(list); - if (likely(skb_queue_len(list) < sch->limit)) { - skb = skb_peek_tail(list); - /* Optimize for add at tail */ - if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) - return qdisc_enqueue_tail(nskb, sch); + /* Optimize for add at tail */ + if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) + return __skb_queue_tail(list, nskb); - skb_queue_reverse_walk(list, skb) { - if (tnext >= netem_skb_cb(skb)->time_to_send) - break; - } - - __skb_queue_after(list, skb, nskb); - sch->qstats.backlog += qdisc_pkt_len(nskb); - return NET_XMIT_SUCCESS; + skb_queue_reverse_walk(list, skb) { + if (tnext >= netem_skb_cb(skb)->time_to_send) + break; } - return qdisc_reshape_fail(nskb, sch); + __skb_queue_after(list, skb, nskb); } /* @@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* We don't fill cb now as skb_unshare() may invalidate it */ struct netem_skb_cb *cb; struct sk_buff *skb2; - int ret; int count = 1; /* Random duplication */ @@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); } + if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) + return qdisc_reshape_fail(skb, sch); + + sch->qstats.backlog += qdisc_pkt_len(skb); + cb = netem_skb_cb(skb); if (q->gap == 0 || /* not doing reordering */ q->counter < q->gap - 1 || /* inside last reordering gap */ @@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) cb->time_to_send = now + delay; ++q->counter; - ret = tfifo_enqueue(skb, sch); + tfifo_enqueue(skb, sch); } else { /* * Do re-ordering by putting one out of N packets at the front @@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->counter = 0; __skb_queue_head(&sch->q, skb); - sch->qstats.backlog += qdisc_pkt_len(skb); sch->qstats.requeues++; - ret = NET_XMIT_SUCCESS; - } - - if (ret != NET_XMIT_SUCCESS) { - if (net_xmit_drop_count(ret)) { - sch->qstats.drops++; - return ret; - } } return NET_XMIT_SUCCESS;