Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/batman-adv/bridge_loop_avoidance.c net/batman-adv/bridge_loop_avoidance.h net/batman-adv/soft-interface.c net/mac80211/mlme.c With merge help from Antonio Quartulli (batman-adv) and Stephen Rothwell (drivers/net/usb/qmi_wwan.c). The net/mac80211/mlme.c conflict seemed easy enough, accounting for a conversion to some new tracing macros. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
04c9f416e3
|
@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
|
|||
|
||||
skb_frag_size_set(frag, size);
|
||||
skb->data_len += size;
|
||||
skb->truesize += size;
|
||||
skb->truesize += PAGE_SIZE;
|
||||
} else
|
||||
skb_put(skb, length);
|
||||
|
||||
|
@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
|
|||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct sk_buff *skb;
|
||||
int buf_size;
|
||||
int tailroom;
|
||||
u64 *mapping;
|
||||
|
||||
if (ipoib_ud_need_sg(priv->max_ib_mtu))
|
||||
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
|
||||
buf_size = IPOIB_UD_HEAD_SIZE;
|
||||
else
|
||||
tailroom = 128; /* reserve some tailroom for IP/TCP headers */
|
||||
} else {
|
||||
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
|
||||
tailroom = 0;
|
||||
}
|
||||
|
||||
skb = dev_alloc_skb(buf_size + 4);
|
||||
skb = dev_alloc_skb(buf_size + tailroom + 4);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
|
|||
skb = NULL;
|
||||
else if (*debug & DEBUG_SEND_ERR)
|
||||
printk(KERN_DEBUG
|
||||
"%s ch%d mgr prim(%x) addr(%x) err %d\n",
|
||||
__func__, ch->nr, hh->prim, ch->addr, ret);
|
||||
"%s mgr prim(%x) err %d\n",
|
||||
__func__, hh->prim, ret);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&st->lmutex);
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include "bonding.h"
|
||||
#include "bond_alb.h"
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
|
|
@ -3230,6 +3230,12 @@ static int bond_master_netdev_event(unsigned long event,
|
|||
switch (event) {
|
||||
case NETDEV_CHANGENAME:
|
||||
return bond_event_changename(event_bond);
|
||||
case NETDEV_UNREGISTER:
|
||||
bond_remove_proc_entry(event_bond);
|
||||
break;
|
||||
case NETDEV_REGISTER:
|
||||
bond_create_proc_entry(event_bond);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -4414,8 +4420,6 @@ static void bond_uninit(struct net_device *bond_dev)
|
|||
|
||||
bond_work_cancel_all(bond);
|
||||
|
||||
bond_remove_proc_entry(bond);
|
||||
|
||||
bond_debug_unregister(bond);
|
||||
|
||||
__hw_addr_flush(&bond->mc_list);
|
||||
|
@ -4817,7 +4821,6 @@ static int bond_init(struct net_device *bond_dev)
|
|||
|
||||
bond_set_lockdep_class(bond_dev);
|
||||
|
||||
bond_create_proc_entry(bond);
|
||||
list_add_tail(&bond->bond_list, &bn->dev_list);
|
||||
|
||||
bond_prepare_sysfs_group(bond);
|
||||
|
|
|
@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
|
|||
if ((phy_data & BMSR_LSTATUS) == 0) {
|
||||
/* link down */
|
||||
netif_carrier_off(netdev);
|
||||
netif_stop_queue(netdev);
|
||||
hw->hibernate = true;
|
||||
if (atl1c_reset_mac(hw) != 0)
|
||||
if (netif_msg_hw(adapter))
|
||||
|
|
|
@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
|
|||
dma_unmap_single(bp->sdev->dma_dev, mapping,
|
||||
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
|
||||
skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
|
||||
if (skb == NULL)
|
||||
return -ENOMEM;
|
||||
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
|
||||
|
@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
|
||||
bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
|
||||
if (!bounce_skb)
|
||||
goto err_out;
|
||||
|
||||
|
|
|
@ -5377,7 +5377,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
|
|||
int k, last;
|
||||
|
||||
if (skb == NULL) {
|
||||
j++;
|
||||
j = NEXT_TX_BD(j);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -5389,8 +5389,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
|
|||
tx_buf->skb = NULL;
|
||||
|
||||
last = tx_buf->nr_frags;
|
||||
j++;
|
||||
for (k = 0; k < last; k++, j++) {
|
||||
j = NEXT_TX_BD(j);
|
||||
for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
|
||||
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
|
||||
dma_unmap_page(&bp->pdev->dev,
|
||||
dma_unmap_addr(tx_buf, mapping),
|
||||
|
|
|
@ -542,7 +542,8 @@ int cnic_unregister_driver(int ulp_type)
|
|||
}
|
||||
|
||||
if (atomic_read(&ulp_ops->ref_count) != 0)
|
||||
netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
|
||||
pr_warn("%s: Failed waiting for ref count to go to zero\n",
|
||||
__func__);
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
|
|
|
@ -2077,10 +2077,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* Steal sock reference for processing TX time stamps */
|
||||
swap(skb_new->sk, skb->sk);
|
||||
swap(skb_new->destructor, skb->destructor);
|
||||
kfree_skb(skb);
|
||||
if (skb->sk)
|
||||
skb_set_owner_w(skb_new, skb->sk);
|
||||
consume_skb(skb);
|
||||
skb = skb_new;
|
||||
}
|
||||
|
||||
|
|
|
@ -6640,6 +6640,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
|
||||
e_err(drv, "Enable failed, SR-IOV enabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Hardware supports up to 8 traffic classes */
|
||||
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
|
||||
(hw->mac.type == ixgbe_mac_82598EB &&
|
||||
|
|
|
@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
|||
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
|
||||
csum);
|
||||
|
||||
wmb();
|
||||
entry = (++priv->cur_tx) % txsize;
|
||||
desc = priv->dma_tx + entry;
|
||||
|
||||
|
@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
|||
len, DMA_TO_DEVICE);
|
||||
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
|
||||
wmb();
|
||||
priv->hw->desc->set_tx_owner(desc);
|
||||
priv->tx_skbuff[entry] = NULL;
|
||||
} else {
|
||||
|
|
|
@ -1334,6 +1334,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
|
||||
wmb();
|
||||
priv->hw->desc->set_tx_owner(desc);
|
||||
wmb();
|
||||
}
|
||||
|
||||
/* Interrupt on completition only for the latest segment */
|
||||
|
@ -1349,6 +1350,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
/* To avoid raise condition */
|
||||
priv->hw->desc->set_tx_owner(first);
|
||||
wmb();
|
||||
|
||||
priv->cur_tx++;
|
||||
|
||||
|
@ -1412,6 +1414,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
|
|||
}
|
||||
wmb();
|
||||
priv->hw->desc->set_rx_owner(p + entry);
|
||||
wmb();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
|
|||
struct mdio_mux_parent_bus *pb = cb->parent;
|
||||
int r;
|
||||
|
||||
mutex_lock(&pb->mii_bus->mdio_lock);
|
||||
/* In theory multiple mdio_mux could be stacked, thus creating
|
||||
* more than a single level of nesting. But in practice,
|
||||
* SINGLE_DEPTH_NESTING will cover the vast majority of use
|
||||
* cases. We use it, instead of trying to handle the general
|
||||
* case.
|
||||
*/
|
||||
mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
|
||||
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
|
||||
if (r)
|
||||
goto out;
|
||||
|
@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
|
|||
|
||||
int r;
|
||||
|
||||
mutex_lock(&pb->mii_bus->mdio_lock);
|
||||
mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
|
||||
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
|
||||
if (r)
|
||||
goto out;
|
||||
|
|
|
@ -374,6 +374,15 @@ static const struct driver_info qmi_wwan_force_int1 = {
|
|||
.data = BIT(1), /* interface whitelist bitmap */
|
||||
};
|
||||
|
||||
static const struct driver_info qmi_wwan_force_int2 = {
|
||||
.description = "Qualcomm WWAN/QMI device",
|
||||
.flags = FLAG_WWAN,
|
||||
.bind = qmi_wwan_bind_shared,
|
||||
.unbind = qmi_wwan_unbind,
|
||||
.manage_power = qmi_wwan_manage_power,
|
||||
.data = BIT(2), /* interface whitelist bitmap */
|
||||
};
|
||||
|
||||
static const struct driver_info qmi_wwan_force_int3 = {
|
||||
.description = "Qualcomm WWAN/QMI device",
|
||||
.flags = FLAG_WWAN,
|
||||
|
@ -526,6 +535,15 @@ static const struct usb_device_id products[] = {
|
|||
.bInterfaceProtocol = 0xff,
|
||||
.driver_info = (unsigned long)&qmi_wwan_force_int4,
|
||||
},
|
||||
{ /* ZTE MF60 */
|
||||
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
|
||||
.idVendor = 0x19d2,
|
||||
.idProduct = 0x1402,
|
||||
.bInterfaceClass = 0xff,
|
||||
.bInterfaceSubClass = 0xff,
|
||||
.bInterfaceProtocol = 0xff,
|
||||
.driver_info = (unsigned long)&qmi_wwan_force_int2,
|
||||
},
|
||||
{ /* Sierra Wireless MC77xx in QMI mode */
|
||||
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
|
||||
.idVendor = 0x1199,
|
||||
|
|
|
@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
|
|||
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
||||
/* create a bounce buffer in zone_dma on mapping failure. */
|
||||
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
|
||||
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
|
||||
bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
|
||||
if (!bounce_skb) {
|
||||
ring->current_slot = old_top_slot;
|
||||
ring->used_slots = old_used_slots;
|
||||
|
|
|
@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
|
||||
if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
|
||||
IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
|
||||
key_flags);
|
||||
spin_unlock_irqrestore(&il->sta_lock, flags);
|
||||
|
@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
|
|||
memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
|
||||
il->stations[sta_id].sta.key.key_flags =
|
||||
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
|
||||
il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
|
||||
il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
|
||||
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
|
|
|
@ -4768,14 +4768,12 @@ il_bg_watchdog(unsigned long data)
|
|||
return;
|
||||
|
||||
/* monitor and check for other stuck queues */
|
||||
if (il_is_any_associated(il)) {
|
||||
for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
|
||||
/* skip as we already checked the command queue */
|
||||
if (cnt == il->cmd_queue)
|
||||
continue;
|
||||
if (il_check_stuck_queue(il, cnt))
|
||||
return;
|
||||
}
|
||||
for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
|
||||
/* skip as we already checked the command queue */
|
||||
if (cnt == il->cmd_queue)
|
||||
continue;
|
||||
if (il_check_stuck_queue(il, cnt))
|
||||
return;
|
||||
}
|
||||
|
||||
mod_timer(&il->watchdog,
|
||||
|
|
|
@ -976,6 +976,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
|
|||
case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
|
||||
/* firmware doesn't support this type of hidden SSID */
|
||||
default:
|
||||
kfree(bss_cfg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
|
|||
case QID_RX:
|
||||
if (!rt2x00queue_full(queue))
|
||||
rt2x00queue_for_each_entry(queue,
|
||||
Q_INDEX_DONE,
|
||||
Q_INDEX,
|
||||
Q_INDEX_DONE,
|
||||
NULL,
|
||||
rt2x00usb_kick_rx_entry);
|
||||
break;
|
||||
|
|
|
@ -78,7 +78,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
|
|||
struct net *net = nf_ct_net(ct);
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
if (net->ct.nf_conntrack_event_cb == NULL)
|
||||
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
|
||||
return;
|
||||
|
||||
e = nf_ct_ecache_find(ct);
|
||||
|
|
|
@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
|||
break;
|
||||
|
||||
case NETDEV_DOWN:
|
||||
if (dev->features & NETIF_F_HW_VLAN_FILTER)
|
||||
vlan_vid_del(dev, 0);
|
||||
|
||||
/* Put all VLANs for this dev in the down state too. */
|
||||
for (i = 0; i < VLAN_N_VID; i++) {
|
||||
vlandev = vlan_group_get_device(grp, i);
|
||||
|
|
|
@ -1381,6 +1381,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
|
|||
* @bat_priv: the bat priv with all the soft interface information
|
||||
* @skb: the frame to be checked
|
||||
* @vid: the VLAN ID of the frame
|
||||
* @is_bcast: the packet came in a broadcast packet type.
|
||||
*
|
||||
* bla_rx avoidance checks if:
|
||||
* * we have to race for a claim
|
||||
|
@ -1390,7 +1391,8 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
|
|||
* returns 1, otherwise it returns 0 and the caller shall further
|
||||
* process the skb.
|
||||
*/
|
||||
int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
|
||||
int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
|
||||
bool is_bcast)
|
||||
{
|
||||
struct ethhdr *ethhdr;
|
||||
struct batadv_claim search_claim, *claim = NULL;
|
||||
|
@ -1409,7 +1411,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
|
|||
|
||||
if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
|
||||
/* don't allow broadcasts while requests are in flight */
|
||||
if (is_multicast_ether_addr(ethhdr->h_dest))
|
||||
if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
|
||||
goto handled;
|
||||
|
||||
memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
|
||||
|
@ -1435,8 +1437,13 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
|
|||
}
|
||||
|
||||
/* if it is a broadcast ... */
|
||||
if (is_multicast_ether_addr(ethhdr->h_dest)) {
|
||||
/* ... drop it. the responsible gateway is in charge. */
|
||||
if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
|
||||
/* ... drop it. the responsible gateway is in charge.
|
||||
*
|
||||
* We need to check is_bcast because with the gateway
|
||||
* feature, broadcasts (like DHCP requests) may be sent
|
||||
* using a unicast packet type.
|
||||
*/
|
||||
goto handled;
|
||||
} else {
|
||||
/* seems the client considers us as its best gateway.
|
||||
|
|
|
@ -21,7 +21,8 @@
|
|||
#define _NET_BATMAN_ADV_BLA_H_
|
||||
|
||||
#ifdef CONFIG_BATMAN_ADV_BLA
|
||||
int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
|
||||
int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
|
||||
bool is_bcast);
|
||||
int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
|
||||
int batadv_bla_is_backbone_gw(struct sk_buff *skb,
|
||||
struct batadv_orig_node *orig_node, int hdr_size);
|
||||
|
@ -40,7 +41,8 @@ void batadv_bla_free(struct batadv_priv *bat_priv);
|
|||
#else /* ifdef CONFIG_BATMAN_ADV_BLA */
|
||||
|
||||
static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
|
||||
struct sk_buff *skb, short vid)
|
||||
struct sk_buff *skb, short vid,
|
||||
bool is_bcast)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -267,8 +267,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
|
|||
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
|
||||
struct ethhdr *ethhdr;
|
||||
struct vlan_ethhdr *vhdr;
|
||||
struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
|
||||
short vid __maybe_unused = -1;
|
||||
__be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
|
||||
bool is_bcast;
|
||||
|
||||
is_bcast = (batadv_header->packet_type == BATADV_BCAST);
|
||||
|
||||
/* check if enough space is available for pulling, and pull */
|
||||
if (!pskb_may_pull(skb, hdr_size))
|
||||
|
@ -315,7 +319,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
|
|||
/* Let the bridge loop avoidance check the packet. If will
|
||||
* not handle it, we can safely push it up.
|
||||
*/
|
||||
if (batadv_bla_rx(bat_priv, skb, vid))
|
||||
if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
|
||||
goto out;
|
||||
|
||||
netif_rx(skb);
|
||||
|
|
|
@ -2457,8 +2457,12 @@ static void skb_update_prio(struct sk_buff *skb)
|
|||
{
|
||||
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
|
||||
|
||||
if ((!skb->priority) && (skb->sk) && map)
|
||||
skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
|
||||
if (!skb->priority && skb->sk && map) {
|
||||
unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
|
||||
|
||||
if (prioidx < map->priomap_len)
|
||||
skb->priority = map->priomap[prioidx];
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define skb_update_prio(skb)
|
||||
|
|
|
@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio)
|
|||
return -ENOSPC;
|
||||
}
|
||||
set_bit(prioidx, prioidx_map);
|
||||
if (atomic_read(&max_prioidx) < prioidx)
|
||||
atomic_set(&max_prioidx, prioidx);
|
||||
spin_unlock_irqrestore(&prioidx_map_lock, flags);
|
||||
atomic_set(&max_prioidx, prioidx);
|
||||
*prio = prioidx;
|
||||
return 0;
|
||||
}
|
||||
|
@ -141,7 +142,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
|
|||
rtnl_lock();
|
||||
for_each_netdev(&init_net, dev) {
|
||||
map = rtnl_dereference(dev->priomap);
|
||||
if (map)
|
||||
if (map && cs->prioidx < map->priomap_len)
|
||||
map->priomap[cs->prioidx] = 0;
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
@ -165,7 +166,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
|
|||
rcu_read_lock();
|
||||
for_each_netdev_rcu(&init_net, dev) {
|
||||
map = rcu_dereference(dev->priomap);
|
||||
priority = map ? map->priomap[prioidx] : 0;
|
||||
priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
|
||||
cb->fill(cb, dev->name, priority);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
|
|||
mtu = dev->mtu;
|
||||
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
|
||||
|
||||
if (size > mtu) {
|
||||
pr_debug("size = %Zu, mtu = %u\n", size, mtu);
|
||||
err = -EINVAL;
|
||||
goto out_dev;
|
||||
}
|
||||
|
||||
hlen = LL_RESERVED_SPACE(dev);
|
||||
tlen = dev->needed_tailroom;
|
||||
skb = sock_alloc_send_skb(sk, hlen + tlen + size,
|
||||
|
@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
|
|||
if (err < 0)
|
||||
goto out_skb;
|
||||
|
||||
if (size > mtu) {
|
||||
pr_debug("size = %Zu, mtu = %u\n", size, mtu);
|
||||
err = -EINVAL;
|
||||
goto out_skb;
|
||||
}
|
||||
|
||||
skb->dev = dev;
|
||||
skb->sk = sk;
|
||||
skb->protocol = htons(ETH_P_IEEE802154);
|
||||
|
|
|
@ -2152,15 +2152,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
|
|||
mgmt->sa, status_code);
|
||||
ieee80211_destroy_assoc_data(sdata, false);
|
||||
} else {
|
||||
sdata_info(sdata, "associated\n");
|
||||
|
||||
if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
|
||||
/* oops -- internal error -- send timeout for now */
|
||||
ieee80211_destroy_assoc_data(sdata, true);
|
||||
sta_info_destroy_addr(sdata, mgmt->bssid);
|
||||
ieee80211_destroy_assoc_data(sdata, false);
|
||||
cfg80211_put_bss(*bss);
|
||||
return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
|
||||
}
|
||||
sdata_info(sdata, "associated\n");
|
||||
|
||||
/*
|
||||
* destroy assoc_data afterwards, as otherwise an idle
|
||||
|
|
|
@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
|
|||
max_rates = sband->n_bitrates;
|
||||
}
|
||||
|
||||
msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
|
||||
msp = kzalloc(sizeof(*msp), gfp);
|
||||
if (!msp)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
#include <linux/netfilter/xt_set.h>
|
||||
#include <linux/netfilter/ipset/ip_set_timeout.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
|
||||
|
@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
|
|||
info->del_set.flags, 0, UINT_MAX);
|
||||
|
||||
/* Normalize to fit into jiffies */
|
||||
if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
|
||||
if (add_opt.timeout != IPSET_NO_TIMEOUT &&
|
||||
add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
|
||||
add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_add(info->add_set.index, skb, par, &add_opt);
|
||||
|
|
|
@ -292,7 +292,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
|
|||
|
||||
pr_debug("%p\n", sk);
|
||||
|
||||
if (llcp_sock == NULL)
|
||||
if (llcp_sock == NULL || llcp_sock->dev == NULL)
|
||||
return -EBADFD;
|
||||
|
||||
addr->sa_family = AF_NFC;
|
||||
|
|
|
@ -229,7 +229,7 @@ found_UDP_peer:
|
|||
return peer;
|
||||
|
||||
new_UDP_peer:
|
||||
_net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
|
||||
_net("Rx UDP DGRAM from NEW peer");
|
||||
read_unlock_bh(&rxrpc_peer_lock);
|
||||
_leave(" = -EBUSY [new]");
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
|
|
@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
|
|||
return PSCHED_NS2TICKS(ticks);
|
||||
}
|
||||
|
||||
static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
||||
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
||||
{
|
||||
struct sk_buff_head *list = &sch->q;
|
||||
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb = skb_peek_tail(list);
|
||||
|
||||
if (likely(skb_queue_len(list) < sch->limit)) {
|
||||
skb = skb_peek_tail(list);
|
||||
/* Optimize for add at tail */
|
||||
if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
|
||||
return qdisc_enqueue_tail(nskb, sch);
|
||||
/* Optimize for add at tail */
|
||||
if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
|
||||
return __skb_queue_tail(list, nskb);
|
||||
|
||||
skb_queue_reverse_walk(list, skb) {
|
||||
if (tnext >= netem_skb_cb(skb)->time_to_send)
|
||||
break;
|
||||
}
|
||||
|
||||
__skb_queue_after(list, skb, nskb);
|
||||
sch->qstats.backlog += qdisc_pkt_len(nskb);
|
||||
return NET_XMIT_SUCCESS;
|
||||
skb_queue_reverse_walk(list, skb) {
|
||||
if (tnext >= netem_skb_cb(skb)->time_to_send)
|
||||
break;
|
||||
}
|
||||
|
||||
return qdisc_reshape_fail(nskb, sch);
|
||||
__skb_queue_after(list, skb, nskb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
/* We don't fill cb now as skb_unshare() may invalidate it */
|
||||
struct netem_skb_cb *cb;
|
||||
struct sk_buff *skb2;
|
||||
int ret;
|
||||
int count = 1;
|
||||
|
||||
/* Random duplication */
|
||||
|
@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
|
||||
}
|
||||
|
||||
if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
|
||||
return qdisc_reshape_fail(skb, sch);
|
||||
|
||||
sch->qstats.backlog += qdisc_pkt_len(skb);
|
||||
|
||||
cb = netem_skb_cb(skb);
|
||||
if (q->gap == 0 || /* not doing reordering */
|
||||
q->counter < q->gap - 1 || /* inside last reordering gap */
|
||||
|
@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
|
||||
cb->time_to_send = now + delay;
|
||||
++q->counter;
|
||||
ret = tfifo_enqueue(skb, sch);
|
||||
tfifo_enqueue(skb, sch);
|
||||
} else {
|
||||
/*
|
||||
* Do re-ordering by putting one out of N packets at the front
|
||||
|
@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
q->counter = 0;
|
||||
|
||||
__skb_queue_head(&sch->q, skb);
|
||||
sch->qstats.backlog += qdisc_pkt_len(skb);
|
||||
sch->qstats.requeues++;
|
||||
ret = NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
if (ret != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(ret)) {
|
||||
sch->qstats.drops++;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
|
|
Loading…
Reference in New Issue