Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix packet header offset calculation in _decode_session6(), from Hajime Tazaki. 2) Fix route leak in error paths of xfrm_lookup(), from Huaibin Wang. 3) Be sure to clear state properly when scans fail in iwlwifi mvm code, from Luciano Coelho. 4) iwlwifi tries to stop scans that aren't actually running, also from Luciano Coelho. 5) mac80211 should drop mesh frames that are not encrypted, fix from Bob Copeland. 6) Add new device ID to b43 wireless driver for BCM432228 chips, from Rafał Miłecki. 7) Fix accidental addition of members after variable sized array in struct tc_u_hnode, from WANG Cong. 8) Don't re-enable interrupts until after we call napi_complete() in ibmveth and WIZnet drivers, frm Yongbae Park. 9) Fix regression in vlan tag handling of fec driver, from Fugang Duan. 10) If a network namespace change fails during rtnl_newlink(), we don't unwind the device registry properly. 11) Fix two TCP regressions, from Neal Cardwell: - Don't allow snd_cwnd_cnt to accumulate huge values due to missing test in tcp_cong_avoid_ai(). - Restore CUBIC back to advancing cwnd by 1.5x packets per RTT. 12) Fix performance regression in xne-netback involving push TX notifications, from David Vrabel. 13) __skb_tstamp_tx() can be called with a NULL sk pointer, do not dereference blindly. From Willem de Bruijn. 14) Fix potential stack overflow in RDS protocol stack, from Arnd Bergmann. 15) VXLAN_VID_MASK used incorrectly in new remote checksum offload support of VXLAN driver. Fix from Alexey Kodanev. 16) Fix too small netlink SKB allocation in inet_diag layer, from Eric Dumazet. 17) ieee80211_check_combinations() does not count interfaces correctly, from Andrei Otcheretianski. 18) Hardware feature determination in bxn2x driver references a piece of software state that actually isn't initialized yet, fix from Michal Schmidt. 19) inet_csk_wait_for_connect() needs a sched_annotate_sleep() annoation, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (56 commits) Revert "net: cx82310_eth: use common match macro" net/mlx4_en: Set statistics bitmap at port init IB/mlx4: Saturate RoCE port PMA counters in case of overflow net/mlx4_en: Fix off-by-one in ethtool statistics display IB/mlx4: Verify net device validity on port change event act_bpf: allow non-default TC_ACT opcodes as BPF exec outcome Revert "smc91x: retrieve IRQ and trigger flags in a modern way" inet: Clean up inet_csk_wait_for_connect() vs. might_sleep() ip6_tunnel: fix error code when tunnel exists netdevice.h: fix ndo_bridge_* comments bnx2x: fix encapsulation features on 57710/57711 mac80211: ignore CSA to same channel nl80211: ignore HT/VHT capabilities without QoS/WMM mac80211: ask for ECSA IE to be considered for beacon parse CRC mac80211: count interfaces correctly for combination checks isdn: icn: use strlcpy() when parsing setup options rxrpc: bogus MSG_PEEK test in rxrpc_recvmsg() caif: fix MSG_OOB test in caif_seqpkt_recvmsg() bridge: reset bridge mtu after deleting an interface can: kvaser_usb: Fix tx queue start/stop race conditions ...
This commit is contained in:
commit
47226fe1b5
|
@ -1741,7 +1741,7 @@ S: Maintained
|
|||
F: drivers/net/ethernet/atheros/
|
||||
|
||||
ATM
|
||||
M: Chas Williams <chas@cmf.nrl.navy.mil>
|
||||
M: Chas Williams <3chas3@gmail.com>
|
||||
L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers)
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://linux-atm.sourceforge.net
|
||||
|
|
|
@ -64,6 +64,14 @@ enum {
|
|||
#define GUID_TBL_BLK_NUM_ENTRIES 8
|
||||
#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
|
||||
|
||||
/* Counters should be saturate once they reach their maximum value */
|
||||
#define ASSIGN_32BIT_COUNTER(counter, value) do {\
|
||||
if ((value) > U32_MAX) \
|
||||
counter = cpu_to_be32(U32_MAX); \
|
||||
else \
|
||||
counter = cpu_to_be32(value); \
|
||||
} while (0)
|
||||
|
||||
struct mlx4_mad_rcv_buf {
|
||||
struct ib_grh grh;
|
||||
u8 payload[256];
|
||||
|
@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
|||
static void edit_counter(struct mlx4_counter *cnt,
|
||||
struct ib_pma_portcounters *pma_cnt)
|
||||
{
|
||||
pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
|
||||
pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
|
||||
pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
|
||||
pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
|
||||
(be64_to_cpu(cnt->tx_bytes) >> 2));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
|
||||
(be64_to_cpu(cnt->rx_bytes) >> 2));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
|
||||
be64_to_cpu(cnt->tx_frames));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
|
||||
be64_to_cpu(cnt->rx_frames));
|
||||
}
|
||||
|
||||
static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
||||
|
|
|
@ -2697,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work)
|
|||
spin_lock_bh(&ibdev->iboe.lock);
|
||||
for (i = 0; i < MLX4_MAX_PORTS; ++i) {
|
||||
struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
|
||||
enum ib_port_state curr_port_state;
|
||||
|
||||
enum ib_port_state curr_port_state =
|
||||
if (!curr_netdev)
|
||||
continue;
|
||||
|
||||
curr_port_state =
|
||||
(netif_running(curr_netdev) &&
|
||||
netif_carrier_ok(curr_netdev)) ?
|
||||
IB_PORT_ACTIVE : IB_PORT_DOWN;
|
||||
|
|
|
@ -1609,7 +1609,7 @@ icn_setup(char *line)
|
|||
if (ints[0] > 1)
|
||||
membase = (unsigned long)ints[2];
|
||||
if (str && *str) {
|
||||
strcpy(sid, str);
|
||||
strlcpy(sid, str, sizeof(sid));
|
||||
icn_id = sid;
|
||||
if ((p = strchr(sid, ','))) {
|
||||
*p++ = 0;
|
||||
|
|
|
@ -131,7 +131,7 @@ config CAN_RCAR
|
|||
|
||||
config CAN_XILINXCAN
|
||||
tristate "Xilinx CAN"
|
||||
depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
|
||||
depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST
|
||||
depends on COMMON_CLK && HAS_IOMEM
|
||||
---help---
|
||||
Xilinx CAN driver. This driver supports both soft AXI CAN IP and
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* Copyright (C) 2015 Valeo S.A.
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -467,10 +468,11 @@ struct kvaser_usb {
|
|||
struct kvaser_usb_net_priv {
|
||||
struct can_priv can;
|
||||
|
||||
atomic_t active_tx_urbs;
|
||||
struct usb_anchor tx_submitted;
|
||||
spinlock_t tx_contexts_lock;
|
||||
int active_tx_contexts;
|
||||
struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
|
||||
|
||||
struct usb_anchor tx_submitted;
|
||||
struct completion start_comp, stop_comp;
|
||||
|
||||
struct kvaser_usb *dev;
|
||||
|
@ -694,6 +696,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
|
|||
struct kvaser_usb_net_priv *priv;
|
||||
struct sk_buff *skb;
|
||||
struct can_frame *cf;
|
||||
unsigned long flags;
|
||||
u8 channel, tid;
|
||||
|
||||
channel = msg->u.tx_acknowledge_header.channel;
|
||||
|
@ -737,12 +740,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
|
|||
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += context->dlc;
|
||||
|
||||
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
|
||||
|
||||
can_get_echo_skb(priv->netdev, context->echo_index);
|
||||
|
||||
context->echo_index = MAX_TX_URBS;
|
||||
atomic_dec(&priv->active_tx_urbs);
|
||||
|
||||
--priv->active_tx_contexts;
|
||||
netif_wake_queue(priv->netdev);
|
||||
|
||||
spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
|
||||
}
|
||||
|
||||
static void kvaser_usb_simple_msg_callback(struct urb *urb)
|
||||
|
@ -803,17 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
usb_kill_anchored_urbs(&priv->tx_submitted);
|
||||
atomic_set(&priv->active_tx_urbs, 0);
|
||||
|
||||
for (i = 0; i < MAX_TX_URBS; i++)
|
||||
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
|
||||
}
|
||||
|
||||
static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
|
||||
const struct kvaser_usb_error_summary *es,
|
||||
struct can_frame *cf)
|
||||
|
@ -1515,6 +1510,24 @@ error:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
priv->active_tx_contexts = 0;
|
||||
for (i = 0; i < MAX_TX_URBS; i++)
|
||||
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
|
||||
}
|
||||
|
||||
/* This method might sleep. Do not call it in the atomic context
|
||||
* of URB completions.
|
||||
*/
|
||||
static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
usb_kill_anchored_urbs(&priv->tx_submitted);
|
||||
kvaser_usb_reset_tx_urb_contexts(priv);
|
||||
}
|
||||
|
||||
static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
|
||||
{
|
||||
int i;
|
||||
|
@ -1634,6 +1647,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|||
struct kvaser_msg *msg;
|
||||
int i, err, ret = NETDEV_TX_OK;
|
||||
u8 *msg_tx_can_flags = NULL; /* GCC */
|
||||
unsigned long flags;
|
||||
|
||||
if (can_dropped_invalid_skb(netdev, skb))
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -1687,12 +1701,21 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|||
if (cf->can_id & CAN_RTR_FLAG)
|
||||
*msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
|
||||
|
||||
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
|
||||
for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
|
||||
if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
|
||||
context = &priv->tx_contexts[i];
|
||||
|
||||
context->echo_index = i;
|
||||
can_put_echo_skb(skb, netdev, context->echo_index);
|
||||
++priv->active_tx_contexts;
|
||||
if (priv->active_tx_contexts >= MAX_TX_URBS)
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
|
||||
|
||||
/* This should never happen; it implies a flow control bug */
|
||||
if (!context) {
|
||||
|
@ -1704,7 +1727,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
context->priv = priv;
|
||||
context->echo_index = i;
|
||||
context->dlc = cf->can_dlc;
|
||||
|
||||
msg->u.tx_can.tid = context->echo_index;
|
||||
|
@ -1716,18 +1738,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|||
kvaser_usb_write_bulk_callback, context);
|
||||
usb_anchor_urb(urb, &priv->tx_submitted);
|
||||
|
||||
can_put_echo_skb(skb, netdev, context->echo_index);
|
||||
|
||||
atomic_inc(&priv->active_tx_urbs);
|
||||
|
||||
if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (unlikely(err)) {
|
||||
can_free_echo_skb(netdev, context->echo_index);
|
||||
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
|
||||
|
||||
can_free_echo_skb(netdev, context->echo_index);
|
||||
context->echo_index = MAX_TX_URBS;
|
||||
--priv->active_tx_contexts;
|
||||
netif_wake_queue(netdev);
|
||||
|
||||
spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
|
||||
|
||||
atomic_dec(&priv->active_tx_urbs);
|
||||
usb_unanchor_urb(urb);
|
||||
|
||||
stats->tx_dropped++;
|
||||
|
@ -1854,7 +1875,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
|
|||
struct kvaser_usb *dev = usb_get_intfdata(intf);
|
||||
struct net_device *netdev;
|
||||
struct kvaser_usb_net_priv *priv;
|
||||
int i, err;
|
||||
int err;
|
||||
|
||||
err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
|
||||
if (err)
|
||||
|
@ -1868,19 +1889,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
|
|||
|
||||
priv = netdev_priv(netdev);
|
||||
|
||||
init_usb_anchor(&priv->tx_submitted);
|
||||
init_completion(&priv->start_comp);
|
||||
init_completion(&priv->stop_comp);
|
||||
|
||||
init_usb_anchor(&priv->tx_submitted);
|
||||
atomic_set(&priv->active_tx_urbs, 0);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
|
||||
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
|
||||
|
||||
priv->dev = dev;
|
||||
priv->netdev = netdev;
|
||||
priv->channel = channel;
|
||||
|
||||
spin_lock_init(&priv->tx_contexts_lock);
|
||||
kvaser_usb_reset_tx_urb_contexts(priv);
|
||||
|
||||
priv->can.state = CAN_STATE_STOPPED;
|
||||
priv->can.clock.freq = CAN_USB_CLOCK;
|
||||
priv->can.bittiming_const = &kvaser_usb_bittiming_const;
|
||||
|
|
|
@ -12769,7 +12769,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
|
|||
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
|
||||
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
|
||||
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if (!CHIP_IS_E1x(bp)) {
|
||||
if (!chip_is_e1x) {
|
||||
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
|
||||
dev->hw_enc_features =
|
||||
|
|
|
@ -1120,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
|
|||
}
|
||||
|
||||
/* Installed successfully, update the cached header too. */
|
||||
memcpy(card_fw, fs_fw, sizeof(*card_fw));
|
||||
*card_fw = *fs_fw;
|
||||
card_fw_usable = 1;
|
||||
*reset = 0; /* already reset as part of load_fw */
|
||||
}
|
||||
|
|
|
@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev)
|
|||
(unsigned int)tp->rx_ring[i].buffer1,
|
||||
(unsigned int)tp->rx_ring[i].buffer2,
|
||||
buf[0], buf[1], buf[2]);
|
||||
for (j = 0; buf[j] != 0xee && j < 1600; j++)
|
||||
for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
|
||||
if (j < 100)
|
||||
pr_cont(" %02x", buf[j]);
|
||||
pr_cont(" j=%d\n", j);
|
||||
|
|
|
@ -1189,13 +1189,12 @@ static void
|
|||
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
|
||||
{
|
||||
struct fec_enet_private *fep;
|
||||
struct bufdesc *bdp, *bdp_t;
|
||||
struct bufdesc *bdp;
|
||||
unsigned short status;
|
||||
struct sk_buff *skb;
|
||||
struct fec_enet_priv_tx_q *txq;
|
||||
struct netdev_queue *nq;
|
||||
int index = 0;
|
||||
int i, bdnum;
|
||||
int entries_free;
|
||||
|
||||
fep = netdev_priv(ndev);
|
||||
|
@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
|
|||
if (bdp == txq->cur_tx)
|
||||
break;
|
||||
|
||||
bdp_t = bdp;
|
||||
bdnum = 1;
|
||||
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
|
||||
skb = txq->tx_skbuff[index];
|
||||
while (!skb) {
|
||||
bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
|
||||
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
|
||||
skb = txq->tx_skbuff[index];
|
||||
bdnum++;
|
||||
}
|
||||
if (skb_shinfo(skb)->nr_frags &&
|
||||
(status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
|
||||
break;
|
||||
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
|
||||
|
||||
for (i = 0; i < bdnum; i++) {
|
||||
if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
|
||||
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
|
||||
bdp->cbd_datlen, DMA_TO_DEVICE);
|
||||
bdp->cbd_bufaddr = 0;
|
||||
if (i < bdnum - 1)
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
|
||||
}
|
||||
skb = txq->tx_skbuff[index];
|
||||
txq->tx_skbuff[index] = NULL;
|
||||
if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
|
||||
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
|
||||
bdp->cbd_datlen, DMA_TO_DEVICE);
|
||||
bdp->cbd_bufaddr = 0;
|
||||
if (!skb) {
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check for errors. */
|
||||
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
|
||||
|
@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
|||
|
||||
vlan_packet_rcvd = true;
|
||||
|
||||
skb_copy_to_linear_data_offset(skb, VLAN_HLEN,
|
||||
data, (2 * ETH_ALEN));
|
||||
memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
|
||||
skb_pull(skb, VLAN_HLEN);
|
||||
}
|
||||
|
||||
|
|
|
@ -1136,6 +1136,8 @@ restart_poll:
|
|||
ibmveth_replenish_task(adapter);
|
||||
|
||||
if (frames_processed < budget) {
|
||||
napi_complete(napi);
|
||||
|
||||
/* We think we are done - reenable interrupts,
|
||||
* then check once more to make sure we are done.
|
||||
*/
|
||||
|
@ -1144,8 +1146,6 @@ restart_poll:
|
|||
|
||||
BUG_ON(lpar_rc != H_SUCCESS);
|
||||
|
||||
napi_complete(napi);
|
||||
|
||||
if (ibmveth_rxq_pending_buffer(adapter) &&
|
||||
napi_reschedule(napi)) {
|
||||
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
|
||||
|
|
|
@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev)
|
|||
/* Schedule multicast task to populate multicast list */
|
||||
queue_work(mdev->workqueue, &priv->rx_mode_task);
|
||||
|
||||
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
|
||||
|
||||
#ifdef CONFIG_MLX4_EN_VXLAN
|
||||
if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
|
||||
vxlan_get_rx_port(dev);
|
||||
|
@ -2853,6 +2851,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
||||
SERVICE_TASK_DELAY);
|
||||
|
||||
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
|
|
@ -453,7 +453,7 @@ struct mlx4_en_port_stats {
|
|||
unsigned long rx_chksum_none;
|
||||
unsigned long rx_chksum_complete;
|
||||
unsigned long tx_chksum_offload;
|
||||
#define NUM_PORT_STATS 9
|
||||
#define NUM_PORT_STATS 10
|
||||
};
|
||||
|
||||
struct mlx4_en_perf_stats {
|
||||
|
|
|
@ -2248,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev)
|
|||
const struct of_device_id *match = NULL;
|
||||
struct smc_local *lp;
|
||||
struct net_device *ndev;
|
||||
struct resource *res;
|
||||
struct resource *res, *ires;
|
||||
unsigned int __iomem *addr;
|
||||
unsigned long irq_flags = SMC_IRQ_FLAGS;
|
||||
unsigned long irq_resflags;
|
||||
int ret;
|
||||
|
||||
ndev = alloc_etherdev(sizeof(struct smc_local));
|
||||
|
@ -2343,19 +2342,16 @@ static int smc_drv_probe(struct platform_device *pdev)
|
|||
goto out_free_netdev;
|
||||
}
|
||||
|
||||
ndev->irq = platform_get_irq(pdev, 0);
|
||||
if (ndev->irq <= 0) {
|
||||
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!ires) {
|
||||
ret = -ENODEV;
|
||||
goto out_release_io;
|
||||
}
|
||||
/*
|
||||
* If this platform does not specify any special irqflags, or if
|
||||
* the resource supplies a trigger, override the irqflags with
|
||||
* the trigger flags from the resource.
|
||||
*/
|
||||
irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
|
||||
if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
|
||||
irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
|
||||
|
||||
ndev->irq = ires->start;
|
||||
|
||||
if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
|
||||
irq_flags = ires->flags & IRQF_TRIGGER_MASK;
|
||||
|
||||
ret = smc_request_attrib(pdev, ndev);
|
||||
if (ret)
|
||||
|
|
|
@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
|
|||
}
|
||||
|
||||
if (rx_count < budget) {
|
||||
napi_complete(napi);
|
||||
w5100_write(priv, W5100_IMR, IR_S0);
|
||||
mmiowb();
|
||||
napi_complete(napi);
|
||||
}
|
||||
|
||||
return rx_count;
|
||||
|
|
|
@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
|
|||
}
|
||||
|
||||
if (rx_count < budget) {
|
||||
napi_complete(napi);
|
||||
w5300_write(priv, W5300_IMR, IR_S0);
|
||||
mmiowb();
|
||||
napi_complete(napi);
|
||||
}
|
||||
|
||||
return rx_count;
|
||||
|
|
|
@ -300,9 +300,18 @@ static const struct driver_info cx82310_info = {
|
|||
.tx_fixup = cx82310_tx_fixup,
|
||||
};
|
||||
|
||||
#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
|
||||
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
|
||||
USB_DEVICE_ID_MATCH_DEV_INFO, \
|
||||
.idVendor = (vend), \
|
||||
.idProduct = (prod), \
|
||||
.bDeviceClass = (cl), \
|
||||
.bDeviceSubClass = (sc), \
|
||||
.bDeviceProtocol = (pr)
|
||||
|
||||
static const struct usb_device_id products[] = {
|
||||
{
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
|
||||
USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
|
||||
.driver_info = (unsigned long) &cx82310_info
|
||||
},
|
||||
{ },
|
||||
|
|
|
@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
napi_hash_del(&vi->rq[i].napi);
|
||||
netif_napi_del(&vi->rq[i].napi);
|
||||
}
|
||||
|
||||
kfree(vi->rq);
|
||||
kfree(vi->sq);
|
||||
|
@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
|
|||
cancel_delayed_work_sync(&vi->refill);
|
||||
|
||||
if (netif_running(vi->dev)) {
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
napi_disable(&vi->rq[i].napi);
|
||||
napi_hash_del(&vi->rq[i].napi);
|
||||
netif_napi_del(&vi->rq[i].napi);
|
||||
}
|
||||
}
|
||||
|
||||
remove_vq_common(vi);
|
||||
|
|
|
@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|||
goto drop;
|
||||
|
||||
flags &= ~VXLAN_HF_RCO;
|
||||
vni &= VXLAN_VID_MASK;
|
||||
vni &= VXLAN_VNI_MASK;
|
||||
}
|
||||
|
||||
/* For backwards compatibility, only allow reserved fields to be
|
||||
|
@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|||
flags &= ~VXLAN_GBP_USED_BITS;
|
||||
}
|
||||
|
||||
if (flags || (vni & ~VXLAN_VID_MASK)) {
|
||||
if (flags || vni & ~VXLAN_VNI_MASK) {
|
||||
/* If there are any unprocessed flags remaining treat
|
||||
* this as a malformed packet. This behavior diverges from
|
||||
* VXLAN RFC (RFC7348) which stipulates that bits in reserved
|
||||
|
|
|
@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
|
|||
case 0x432a: /* BCM4321 */
|
||||
case 0x432d: /* BCM4322 */
|
||||
case 0x4352: /* BCM43222 */
|
||||
case 0x435a: /* BCM43228 */
|
||||
case 0x4333: /* BCM4331 */
|
||||
case 0x43a2: /* BCM4360 */
|
||||
case 0x43b3: /* BCM4352 */
|
||||
|
|
|
@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
|
|||
void *dcmd_buf = NULL, *wr_pointer;
|
||||
u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
|
||||
|
||||
brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set,
|
||||
cmdhdr->len);
|
||||
if (len < sizeof(*cmdhdr)) {
|
||||
brcmf_err("vendor command too short: %d\n", len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
|
||||
ifp = vif->ifp;
|
||||
|
||||
len -= sizeof(struct brcmf_vndr_dcmd_hdr);
|
||||
brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd);
|
||||
|
||||
if (cmdhdr->offset > len) {
|
||||
brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
len -= cmdhdr->offset;
|
||||
ret_len = cmdhdr->len;
|
||||
if (ret_len > 0 || len > 0) {
|
||||
if (len > BRCMF_DCMD_MAXLEN) {
|
||||
|
|
|
@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
|
|||
.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
|
||||
.base_params = &iwl1000_base_params, \
|
||||
.eeprom_params = &iwl1000_eeprom_params, \
|
||||
.led_mode = IWL_LED_BLINK
|
||||
.led_mode = IWL_LED_BLINK, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl1000_bgn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
|
||||
|
@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
|
|||
.base_params = &iwl1000_base_params, \
|
||||
.eeprom_params = &iwl1000_eeprom_params, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.rx_with_siso_diversity = true
|
||||
.rx_with_siso_diversity = true, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl100_bgn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
|
||||
|
|
|
@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
|
|||
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
|
||||
.base_params = &iwl2000_base_params, \
|
||||
.eeprom_params = &iwl20x0_eeprom_params, \
|
||||
.led_mode = IWL_LED_RF_STATE
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
|
||||
const struct iwl_cfg iwl2000_2bgn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
|
||||
|
@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
|
|||
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
|
||||
.base_params = &iwl2030_base_params, \
|
||||
.eeprom_params = &iwl20x0_eeprom_params, \
|
||||
.led_mode = IWL_LED_RF_STATE
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl2030_2bgn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
|
||||
|
@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
|
|||
.base_params = &iwl2000_base_params, \
|
||||
.eeprom_params = &iwl20x0_eeprom_params, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.rx_with_siso_diversity = true
|
||||
.rx_with_siso_diversity = true, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl105_bgn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
|
||||
|
@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
|
|||
.base_params = &iwl2030_base_params, \
|
||||
.eeprom_params = &iwl20x0_eeprom_params, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.rx_with_siso_diversity = true
|
||||
.rx_with_siso_diversity = true, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl135_bgn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
|
||||
|
|
|
@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
|
|||
.nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
|
||||
.base_params = &iwl5000_base_params, \
|
||||
.eeprom_params = &iwl5000_eeprom_params, \
|
||||
.led_mode = IWL_LED_BLINK
|
||||
.led_mode = IWL_LED_BLINK, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl5300_agn_cfg = {
|
||||
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
|
||||
|
@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
|
|||
.base_params = &iwl5000_base_params, \
|
||||
.eeprom_params = &iwl5000_eeprom_params, \
|
||||
.led_mode = IWL_LED_BLINK, \
|
||||
.internal_wimax_coex = true
|
||||
.internal_wimax_coex = true, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl5150_agn_cfg = {
|
||||
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
|
||||
|
|
|
@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
|
|||
.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
|
||||
.base_params = &iwl6000_g2_base_params, \
|
||||
.eeprom_params = &iwl6000_eeprom_params, \
|
||||
.led_mode = IWL_LED_RF_STATE
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl6005_2agn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
|
||||
|
@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
|
|||
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
|
||||
.base_params = &iwl6000_g2_base_params, \
|
||||
.eeprom_params = &iwl6000_eeprom_params, \
|
||||
.led_mode = IWL_LED_RF_STATE
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl6030_2agn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
|
||||
|
@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
|
|||
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
|
||||
.base_params = &iwl6000_g2_base_params, \
|
||||
.eeprom_params = &iwl6000_eeprom_params, \
|
||||
.led_mode = IWL_LED_RF_STATE
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl6035_2agn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
|
||||
|
@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
|
|||
.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
|
||||
.base_params = &iwl6000_base_params, \
|
||||
.eeprom_params = &iwl6000_eeprom_params, \
|
||||
.led_mode = IWL_LED_BLINK
|
||||
.led_mode = IWL_LED_BLINK, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl6000i_2agn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
|
||||
|
@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
|
|||
.base_params = &iwl6050_base_params, \
|
||||
.eeprom_params = &iwl6000_eeprom_params, \
|
||||
.led_mode = IWL_LED_BLINK, \
|
||||
.internal_wimax_coex = true
|
||||
.internal_wimax_coex = true, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl6050_2agn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
|
||||
|
@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
|
|||
.base_params = &iwl6050_base_params, \
|
||||
.eeprom_params = &iwl6000_eeprom_params, \
|
||||
.led_mode = IWL_LED_BLINK, \
|
||||
.internal_wimax_coex = true
|
||||
.internal_wimax_coex = true, \
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
|
||||
|
||||
const struct iwl_cfg iwl6150_bgn_cfg = {
|
||||
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
|
||||
|
|
|
@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
|
|||
if (!vif->bss_conf.assoc)
|
||||
smps_mode = IEEE80211_SMPS_AUTOMATIC;
|
||||
|
||||
if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
|
||||
if (mvmvif->phy_ctxt &&
|
||||
IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
|
||||
mvmvif->phy_ctxt->id))
|
||||
smps_mode = IEEE80211_SMPS_AUTOMATIC;
|
||||
|
||||
|
|
|
@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
|
|||
if (!vif->bss_conf.assoc)
|
||||
smps_mode = IEEE80211_SMPS_AUTOMATIC;
|
||||
|
||||
if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
|
||||
if (mvmvif->phy_ctxt &&
|
||||
data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
|
||||
smps_mode = IEEE80211_SMPS_AUTOMATIC;
|
||||
|
||||
IWL_DEBUG_COEX(data->mvm,
|
||||
|
|
|
@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
|
||||
&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
|
||||
|
||||
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER)
|
||||
if ((mvm->fw->ucode_capa.capa[0] &
|
||||
IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
|
||||
(mvm->fw->ucode_capa.api[0] &
|
||||
IWL_UCODE_TLV_API_LQ_SS_PARAMS))
|
||||
hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
|
||||
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
|
||||
}
|
||||
|
@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
|
|||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
iwl_mvm_cancel_scan(mvm);
|
||||
/* Due to a race condition, it's possible that mac80211 asks
|
||||
* us to stop a hw_scan when it's already stopped. This can
|
||||
* happen, for instance, if we stopped the scan ourselves,
|
||||
* called ieee80211_scan_completed() and the userspace called
|
||||
* cancel scan scan before ieee80211_scan_work() could run.
|
||||
* To handle that, simply return if the scan is not running.
|
||||
*/
|
||||
/* FIXME: for now, we ignore this race for UMAC scans, since
|
||||
* they don't set the scan_status.
|
||||
*/
|
||||
if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
|
||||
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
|
||||
iwl_mvm_cancel_scan(mvm);
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
|
|||
int ret;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
/* Due to a race condition, it's possible that mac80211 asks
|
||||
* us to stop a sched_scan when it's already stopped. This
|
||||
* can happen, for instance, if we stopped the scan ourselves,
|
||||
* called ieee80211_sched_scan_stopped() and the userspace called
|
||||
* stop sched scan scan before ieee80211_sched_scan_stopped_work()
|
||||
* could run. To handle this, simply return if the scan is
|
||||
* not running.
|
||||
*/
|
||||
/* FIXME: for now, we ignore this race for UMAC scans, since
|
||||
* they don't set the scan_status.
|
||||
*/
|
||||
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
|
||||
!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
|
||||
mutex_unlock(&mvm->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_scan_offload_stop(mvm, false);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
iwl_mvm_wait_for_async_handlers(mvm);
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
||||
|
|
|
@ -134,9 +134,12 @@ enum rs_column_mode {
|
|||
#define MAX_NEXT_COLUMNS 7
|
||||
#define MAX_COLUMN_CHECKS 3
|
||||
|
||||
struct rs_tx_column;
|
||||
|
||||
typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta,
|
||||
struct iwl_scale_tbl_info *tbl);
|
||||
struct iwl_scale_tbl_info *tbl,
|
||||
const struct rs_tx_column *next_col);
|
||||
|
||||
struct rs_tx_column {
|
||||
enum rs_column_mode mode;
|
||||
|
@ -147,13 +150,15 @@ struct rs_tx_column {
|
|||
};
|
||||
|
||||
static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
struct iwl_scale_tbl_info *tbl)
|
||||
struct iwl_scale_tbl_info *tbl,
|
||||
const struct rs_tx_column *next_col)
|
||||
{
|
||||
return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant);
|
||||
return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
|
||||
}
|
||||
|
||||
static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
struct iwl_scale_tbl_info *tbl)
|
||||
struct iwl_scale_tbl_info *tbl,
|
||||
const struct rs_tx_column *next_col)
|
||||
{
|
||||
if (!sta->ht_cap.ht_supported)
|
||||
return false;
|
||||
|
@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
}
|
||||
|
||||
static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
struct iwl_scale_tbl_info *tbl)
|
||||
struct iwl_scale_tbl_info *tbl,
|
||||
const struct rs_tx_column *next_col)
|
||||
{
|
||||
if (!sta->ht_cap.ht_supported)
|
||||
return false;
|
||||
|
@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
}
|
||||
|
||||
static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
struct iwl_scale_tbl_info *tbl)
|
||||
struct iwl_scale_tbl_info *tbl,
|
||||
const struct rs_tx_column *next_col)
|
||||
{
|
||||
struct rs_rate *rate = &tbl->rate;
|
||||
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
|
||||
|
@ -1590,7 +1597,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
|
|||
|
||||
for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
|
||||
allow_func = next_col->checks[j];
|
||||
if (allow_func && !allow_func(mvm, sta, tbl))
|
||||
if (allow_func && !allow_func(mvm, sta, tbl, next_col))
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
|
|||
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
|
||||
return 0;
|
||||
|
||||
if (iwl_mvm_is_radio_killed(mvm))
|
||||
if (iwl_mvm_is_radio_killed(mvm)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
|
||||
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
|
||||
|
@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
|
|||
IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
|
||||
sched ? "offloaded " : "", ret);
|
||||
iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
|
||||
sched ? "offloaded " : "");
|
||||
|
||||
ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
out:
|
||||
/*
|
||||
* Clear the scan status so the next scan requests will succeed. This
|
||||
* also ensures the Rx handler doesn't do anything, as the scan was
|
||||
|
@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
|
|||
if (mvm->scan_status == IWL_MVM_SCAN_OS)
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
|
||||
|
||||
out:
|
||||
mvm->scan_status = IWL_MVM_SCAN_NONE;
|
||||
|
||||
if (notify) {
|
||||
|
@ -1177,7 +1176,7 @@ out:
|
|||
ieee80211_scan_completed(mvm->hw, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
|
||||
|
|
|
@ -750,8 +750,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
|
|||
* request
|
||||
*/
|
||||
list_for_each_entry(te_data, &mvm->time_event_list, list) {
|
||||
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE &&
|
||||
te_data->running) {
|
||||
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
|
||||
is_p2p = true;
|
||||
goto remove_te;
|
||||
|
@ -766,10 +765,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
|
|||
* request
|
||||
*/
|
||||
list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
|
||||
if (te_data->running) {
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
|
||||
goto remove_te;
|
||||
}
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
|
||||
goto remove_te;
|
||||
}
|
||||
|
||||
remove_te:
|
||||
|
|
|
@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
|
|||
}
|
||||
|
||||
return true;
|
||||
} else if (0x86DD == ether_type) {
|
||||
return true;
|
||||
} else if (ETH_P_IPV6 == ether_type) {
|
||||
/* TODO: Handle any IPv6 cases that need special handling.
|
||||
* For now, always return false
|
||||
*/
|
||||
goto end;
|
||||
}
|
||||
|
||||
end:
|
||||
|
|
|
@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
|
|||
static void make_tx_response(struct xenvif_queue *queue,
|
||||
struct xen_netif_tx_request *txp,
|
||||
s8 st);
|
||||
static void push_tx_responses(struct xenvif_queue *queue);
|
||||
|
||||
static inline int tx_work_todo(struct xenvif_queue *queue);
|
||||
|
||||
|
@ -655,15 +656,10 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
|
|||
unsigned long flags;
|
||||
|
||||
do {
|
||||
int notify;
|
||||
|
||||
spin_lock_irqsave(&queue->response_lock, flags);
|
||||
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
|
||||
push_tx_responses(queue);
|
||||
spin_unlock_irqrestore(&queue->response_lock, flags);
|
||||
if (notify)
|
||||
notify_remote_via_irq(queue->tx_irq);
|
||||
|
||||
if (cons == end)
|
||||
break;
|
||||
txp = RING_GET_REQUEST(&queue->tx, cons++);
|
||||
|
@ -1657,7 +1653,6 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
|
|||
{
|
||||
struct pending_tx_info *pending_tx_info;
|
||||
pending_ring_idx_t index;
|
||||
int notify;
|
||||
unsigned long flags;
|
||||
|
||||
pending_tx_info = &queue->pending_tx_info[pending_idx];
|
||||
|
@ -1673,12 +1668,9 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
|
|||
index = pending_index(queue->pending_prod++);
|
||||
queue->pending_ring[index] = pending_idx;
|
||||
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
|
||||
push_tx_responses(queue);
|
||||
|
||||
spin_unlock_irqrestore(&queue->response_lock, flags);
|
||||
|
||||
if (notify)
|
||||
notify_remote_via_irq(queue->tx_irq);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1699,6 +1691,15 @@ static void make_tx_response(struct xenvif_queue *queue,
|
|||
queue->tx.rsp_prod_pvt = ++i;
|
||||
}
|
||||
|
||||
static void push_tx_responses(struct xenvif_queue *queue)
|
||||
{
|
||||
int notify;
|
||||
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
|
||||
if (notify)
|
||||
notify_remote_via_irq(queue->tx_irq);
|
||||
}
|
||||
|
||||
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
|
||||
u16 id,
|
||||
s8 st,
|
||||
|
|
|
@ -965,9 +965,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
|
|||
* Used to add FDB entries to dump requests. Implementers should add
|
||||
* entries to skb and update idx with the number of entries.
|
||||
*
|
||||
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
|
||||
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
|
||||
* u16 flags)
|
||||
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
|
||||
* struct net_device *dev, u32 filter_mask)
|
||||
* int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
|
||||
* u16 flags);
|
||||
*
|
||||
* int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
|
||||
* Called to change device carrier. Soft-devices (like dummy, team, etc)
|
||||
|
|
|
@ -948,6 +948,13 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
|
|||
to->l4_hash = from->l4_hash;
|
||||
};
|
||||
|
||||
static inline void skb_sender_cpu_clear(struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_XPS
|
||||
skb->sender_cpu = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef NET_SKBUFF_DATA_USES_OFFSET
|
||||
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
|
||||
{
|
||||
|
|
|
@ -481,6 +481,7 @@ void dst_init(void);
|
|||
enum {
|
||||
XFRM_LOOKUP_ICMP = 1 << 0,
|
||||
XFRM_LOOKUP_QUEUE = 1 << 1,
|
||||
XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
|
||||
};
|
||||
|
||||
struct flowi;
|
||||
|
|
|
@ -91,6 +91,7 @@ struct vxlanhdr {
|
|||
|
||||
#define VXLAN_N_VID (1u << 24)
|
||||
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
|
||||
#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8)
|
||||
#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
|
||||
|
||||
struct vxlan_metadata {
|
||||
|
|
|
@ -563,6 +563,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
|
|||
*/
|
||||
del_nbp(p);
|
||||
|
||||
dev_set_mtu(br->dev, br_min_mtu(br));
|
||||
|
||||
spin_lock_bh(&br->lock);
|
||||
changed_addr = br_stp_recalculate_bridge_id(br);
|
||||
spin_unlock_bh(&br->lock);
|
||||
|
|
|
@ -281,7 +281,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
int copylen;
|
||||
|
||||
ret = -EOPNOTSUPP;
|
||||
if (m->msg_flags&MSG_OOB)
|
||||
if (flags & MSG_OOB)
|
||||
goto read_error;
|
||||
|
||||
skb = skb_recv_datagram(sk, flags, 0 , &ret);
|
||||
|
|
|
@ -2166,28 +2166,28 @@ replay:
|
|||
}
|
||||
}
|
||||
err = rtnl_configure_link(dev, ifm);
|
||||
if (err < 0) {
|
||||
if (ops->newlink) {
|
||||
LIST_HEAD(list_kill);
|
||||
|
||||
ops->dellink(dev, &list_kill);
|
||||
unregister_netdevice_many(&list_kill);
|
||||
} else {
|
||||
unregister_netdevice(dev);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (err < 0)
|
||||
goto out_unregister;
|
||||
if (link_net) {
|
||||
err = dev_change_net_namespace(dev, dest_net, ifname);
|
||||
if (err < 0)
|
||||
unregister_netdevice(dev);
|
||||
goto out_unregister;
|
||||
}
|
||||
out:
|
||||
if (link_net)
|
||||
put_net(link_net);
|
||||
put_net(dest_net);
|
||||
return err;
|
||||
out_unregister:
|
||||
if (ops->newlink) {
|
||||
LIST_HEAD(list_kill);
|
||||
|
||||
ops->dellink(dev, &list_kill);
|
||||
unregister_netdevice_many(&list_kill);
|
||||
} else {
|
||||
unregister_netdevice(dev);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3733,9 +3733,13 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
|
|||
struct sock *sk, int tstype)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
|
||||
bool tsonly;
|
||||
|
||||
if (!sk || !skb_may_tx_timestamp(sk, tsonly))
|
||||
if (!sk)
|
||||
return;
|
||||
|
||||
tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
|
||||
if (!skb_may_tx_timestamp(sk, tsonly))
|
||||
return;
|
||||
|
||||
if (tsonly)
|
||||
|
@ -4173,7 +4177,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
|
|||
skb->ignore_df = 0;
|
||||
skb_dst_drop(skb);
|
||||
skb->mark = 0;
|
||||
skb->sender_cpu = 0;
|
||||
skb_sender_cpu_clear(skb);
|
||||
skb_init_secmark(skb);
|
||||
secpath_reset(skb);
|
||||
nf_reset(skb);
|
||||
|
|
|
@ -1655,6 +1655,10 @@ void sock_rfree(struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL(sock_rfree);
|
||||
|
||||
/*
|
||||
* Buffer destructor for skbs that are not used directly in read or write
|
||||
* path, e.g. for error handler skbs. Automatically called from kfree_skb.
|
||||
*/
|
||||
void sock_efree(struct sk_buff *skb)
|
||||
{
|
||||
sock_put(skb->sk);
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
static int zero = 0;
|
||||
static int one = 1;
|
||||
static int ushort_max = USHRT_MAX;
|
||||
static int min_sndbuf = SOCK_MIN_SNDBUF;
|
||||
static int min_rcvbuf = SOCK_MIN_RCVBUF;
|
||||
|
||||
static int net_msg_warn; /* Unused, but still a sysctl */
|
||||
|
||||
|
@ -237,7 +239,7 @@ static struct ctl_table net_core_table[] = {
|
|||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &one,
|
||||
.extra1 = &min_sndbuf,
|
||||
},
|
||||
{
|
||||
.procname = "rmem_max",
|
||||
|
@ -245,7 +247,7 @@ static struct ctl_table net_core_table[] = {
|
|||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &one,
|
||||
.extra1 = &min_rcvbuf,
|
||||
},
|
||||
{
|
||||
.procname = "wmem_default",
|
||||
|
@ -253,7 +255,7 @@ static struct ctl_table net_core_table[] = {
|
|||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &one,
|
||||
.extra1 = &min_sndbuf,
|
||||
},
|
||||
{
|
||||
.procname = "rmem_default",
|
||||
|
@ -261,7 +263,7 @@ static struct ctl_table net_core_table[] = {
|
|||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &one,
|
||||
.extra1 = &min_rcvbuf,
|
||||
},
|
||||
{
|
||||
.procname = "dev_weight",
|
||||
|
|
|
@ -268,6 +268,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
|
|||
release_sock(sk);
|
||||
if (reqsk_queue_empty(&icsk->icsk_accept_queue))
|
||||
timeo = schedule_timeout(timeo);
|
||||
sched_annotate_sleep();
|
||||
lock_sock(sk);
|
||||
err = 0;
|
||||
if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
|
||||
|
|
|
@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
|
|||
mutex_unlock(&inet_diag_table_mutex);
|
||||
}
|
||||
|
||||
static size_t inet_sk_attr_size(void)
|
||||
{
|
||||
return nla_total_size(sizeof(struct tcp_info))
|
||||
+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
|
||||
+ nla_total_size(1) /* INET_DIAG_TOS */
|
||||
+ nla_total_size(1) /* INET_DIAG_TCLASS */
|
||||
+ nla_total_size(sizeof(struct inet_diag_meminfo))
|
||||
+ nla_total_size(sizeof(struct inet_diag_msg))
|
||||
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
|
||||
+ nla_total_size(TCP_CA_NAME_MAX)
|
||||
+ nla_total_size(sizeof(struct tcpvegas_info))
|
||||
+ 64;
|
||||
}
|
||||
|
||||
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|
||||
struct sk_buff *skb, struct inet_diag_req_v2 *req,
|
||||
struct user_namespace *user_ns,
|
||||
|
@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
rep = nlmsg_new(sizeof(struct inet_diag_msg) +
|
||||
sizeof(struct inet_diag_meminfo) +
|
||||
sizeof(struct tcp_info) + 64, GFP_KERNEL);
|
||||
rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
|
||||
if (!rep) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
|
|
|
@ -67,6 +67,7 @@ static int ip_forward_finish(struct sk_buff *skb)
|
|||
if (unlikely(opt->optlen))
|
||||
ip_forward_options(skb);
|
||||
|
||||
skb_sender_cpu_clear(skb);
|
||||
return dst_output(skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -378,6 +378,12 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
|
|||
*/
|
||||
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
|
||||
{
|
||||
/* If credits accumulated at a higher w, apply them gently now. */
|
||||
if (tp->snd_cwnd_cnt >= w) {
|
||||
tp->snd_cwnd_cnt = 0;
|
||||
tp->snd_cwnd++;
|
||||
}
|
||||
|
||||
tp->snd_cwnd_cnt += acked;
|
||||
if (tp->snd_cwnd_cnt >= w) {
|
||||
u32 delta = tp->snd_cwnd_cnt / w;
|
||||
|
|
|
@ -306,8 +306,10 @@ tcp_friendliness:
|
|||
}
|
||||
}
|
||||
|
||||
if (ca->cnt == 0) /* cannot be zero */
|
||||
ca->cnt = 1;
|
||||
/* The maximum rate of cwnd increase CUBIC allows is 1 packet per
|
||||
* 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
|
||||
*/
|
||||
ca->cnt = max(ca->cnt, 2U);
|
||||
}
|
||||
|
||||
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||
|
|
|
@ -63,6 +63,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
return err;
|
||||
|
||||
IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
return x->outer_mode->output2(x, skb);
|
||||
}
|
||||
|
@ -71,7 +72,6 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
|
|||
int xfrm4_output_finish(struct sk_buff *skb)
|
||||
{
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
#ifdef CONFIG_NETFILTER
|
||||
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
|
||||
|
|
|
@ -318,6 +318,7 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
|
|||
|
||||
static inline int ip6_forward_finish(struct sk_buff *skb)
|
||||
{
|
||||
skb_sender_cpu_clear(skb);
|
||||
return dst_output(skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -314,7 +314,7 @@ out:
|
|||
* Create tunnel matching given parameters.
|
||||
*
|
||||
* Return:
|
||||
* created tunnel or NULL
|
||||
* created tunnel or error pointer
|
||||
**/
|
||||
|
||||
static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
|
||||
|
@ -322,7 +322,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
|
|||
struct net_device *dev;
|
||||
struct ip6_tnl *t;
|
||||
char name[IFNAMSIZ];
|
||||
int err;
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (p->name[0])
|
||||
strlcpy(name, p->name, IFNAMSIZ);
|
||||
|
@ -348,7 +348,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
|
|||
failed_free:
|
||||
ip6_dev_free(dev);
|
||||
failed:
|
||||
return NULL;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -362,7 +362,7 @@ failed:
|
|||
* tunnel device is created and registered for use.
|
||||
*
|
||||
* Return:
|
||||
* matching tunnel or NULL
|
||||
* matching tunnel or error pointer
|
||||
**/
|
||||
|
||||
static struct ip6_tnl *ip6_tnl_locate(struct net *net,
|
||||
|
@ -380,13 +380,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
|
|||
if (ipv6_addr_equal(local, &t->parms.laddr) &&
|
||||
ipv6_addr_equal(remote, &t->parms.raddr)) {
|
||||
if (create)
|
||||
return NULL;
|
||||
return ERR_PTR(-EEXIST);
|
||||
|
||||
return t;
|
||||
}
|
||||
}
|
||||
if (!create)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENODEV);
|
||||
return ip6_tnl_create(net, p);
|
||||
}
|
||||
|
||||
|
@ -1420,7 +1420,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
}
|
||||
ip6_tnl_parm_from_user(&p1, &p);
|
||||
t = ip6_tnl_locate(net, &p1, 0);
|
||||
if (t == NULL)
|
||||
if (IS_ERR(t))
|
||||
t = netdev_priv(dev);
|
||||
} else {
|
||||
memset(&p, 0, sizeof(p));
|
||||
|
@ -1445,7 +1445,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
ip6_tnl_parm_from_user(&p1, &p);
|
||||
t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
|
||||
if (cmd == SIOCCHGTUNNEL) {
|
||||
if (t != NULL) {
|
||||
if (!IS_ERR(t)) {
|
||||
if (t->dev != dev) {
|
||||
err = -EEXIST;
|
||||
break;
|
||||
|
@ -1457,14 +1457,15 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
else
|
||||
err = ip6_tnl_update(t, &p1);
|
||||
}
|
||||
if (t) {
|
||||
if (!IS_ERR(t)) {
|
||||
err = 0;
|
||||
ip6_tnl_parm_to_user(&p, &t->parms);
|
||||
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
|
||||
err = -EFAULT;
|
||||
|
||||
} else
|
||||
err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
|
||||
} else {
|
||||
err = PTR_ERR(t);
|
||||
}
|
||||
break;
|
||||
case SIOCDELTUNNEL:
|
||||
err = -EPERM;
|
||||
|
@ -1478,7 +1479,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
err = -ENOENT;
|
||||
ip6_tnl_parm_from_user(&p1, &p);
|
||||
t = ip6_tnl_locate(net, &p1, 0);
|
||||
if (t == NULL)
|
||||
if (IS_ERR(t))
|
||||
break;
|
||||
err = -EPERM;
|
||||
if (t->dev == ip6n->fb_tnl_dev)
|
||||
|
@ -1672,12 +1673,13 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
|
|||
struct nlattr *tb[], struct nlattr *data[])
|
||||
{
|
||||
struct net *net = dev_net(dev);
|
||||
struct ip6_tnl *nt;
|
||||
struct ip6_tnl *nt, *t;
|
||||
|
||||
nt = netdev_priv(dev);
|
||||
ip6_tnl_netlink_parms(data, &nt->parms);
|
||||
|
||||
if (ip6_tnl_locate(net, &nt->parms, 0))
|
||||
t = ip6_tnl_locate(net, &nt->parms, 0);
|
||||
if (!IS_ERR(t))
|
||||
return -EEXIST;
|
||||
|
||||
return ip6_tnl_create2(dev);
|
||||
|
@ -1697,8 +1699,7 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
|
|||
ip6_tnl_netlink_parms(data, &p);
|
||||
|
||||
t = ip6_tnl_locate(net, &p, 0);
|
||||
|
||||
if (t) {
|
||||
if (!IS_ERR(t)) {
|
||||
if (t->dev != dev)
|
||||
return -EEXIST;
|
||||
} else
|
||||
|
|
|
@ -114,6 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
return err;
|
||||
|
||||
skb->ignore_df = 1;
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
|
||||
return x->outer_mode->output2(x, skb);
|
||||
}
|
||||
|
@ -122,7 +123,6 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
|
|||
int xfrm6_output_finish(struct sk_buff *skb)
|
||||
{
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
|
||||
#ifdef CONFIG_NETFILTER
|
||||
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
|
||||
|
|
|
@ -200,6 +200,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
|
|||
|
||||
#if IS_ENABLED(CONFIG_IPV6_MIP6)
|
||||
case IPPROTO_MH:
|
||||
offset += ipv6_optlen(exthdr);
|
||||
if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
|
||||
struct ip6_mh *mh;
|
||||
|
||||
|
|
|
@ -58,13 +58,24 @@ struct ieee80211_local;
|
|||
#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
|
||||
|
||||
/*
|
||||
* Some APs experience problems when working with U-APSD. Decrease the
|
||||
* probability of that happening by using legacy mode for all ACs but VO.
|
||||
* The AP that caused us trouble was a Cisco 4410N. It ignores our
|
||||
* setting, and always treats non-VO ACs as legacy.
|
||||
* Some APs experience problems when working with U-APSD. Decreasing the
|
||||
* probability of that happening by using legacy mode for all ACs but VO isn't
|
||||
* enough.
|
||||
*
|
||||
* Cisco 4410N originally forced us to enable VO by default only because it
|
||||
* treated non-VO ACs as legacy.
|
||||
*
|
||||
* However some APs (notably Netgear R7000) silently reclassify packets to
|
||||
* different ACs. Since u-APSD ACs require trigger frames for frame retrieval
|
||||
* clients would never see some frames (e.g. ARP responses) or would fetch them
|
||||
* accidentally after a long time.
|
||||
*
|
||||
* It makes little sense to enable u-APSD queues by default because it needs
|
||||
* userspace applications to be aware of it to actually take advantage of the
|
||||
* possible additional powersavings. Implicitly depending on driver autotrigger
|
||||
* frame support doesn't make much sense.
|
||||
*/
|
||||
#define IEEE80211_DEFAULT_UAPSD_QUEUES \
|
||||
IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
|
||||
#define IEEE80211_DEFAULT_UAPSD_QUEUES 0
|
||||
|
||||
#define IEEE80211_DEFAULT_MAX_SP_LEN \
|
||||
IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
|
||||
|
@ -453,6 +464,7 @@ struct ieee80211_if_managed {
|
|||
unsigned int flags;
|
||||
|
||||
bool csa_waiting_bcn;
|
||||
bool csa_ignored_same_chan;
|
||||
|
||||
bool beacon_crc_valid;
|
||||
u32 beacon_crc;
|
||||
|
|
|
@ -1150,6 +1150,17 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|||
return;
|
||||
}
|
||||
|
||||
if (cfg80211_chandef_identical(&csa_ie.chandef,
|
||||
&sdata->vif.bss_conf.chandef)) {
|
||||
if (ifmgd->csa_ignored_same_chan)
|
||||
return;
|
||||
sdata_info(sdata,
|
||||
"AP %pM tries to chanswitch to same channel, ignore\n",
|
||||
ifmgd->associated->bssid);
|
||||
ifmgd->csa_ignored_same_chan = true;
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&local->mtx);
|
||||
mutex_lock(&local->chanctx_mtx);
|
||||
conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
|
||||
|
@ -1210,6 +1221,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|||
sdata->vif.csa_active = true;
|
||||
sdata->csa_chandef = csa_ie.chandef;
|
||||
sdata->csa_block_tx = csa_ie.mode;
|
||||
ifmgd->csa_ignored_same_chan = false;
|
||||
|
||||
if (sdata->csa_block_tx)
|
||||
ieee80211_stop_vif_queues(local, sdata,
|
||||
|
@ -2090,6 +2102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
sdata->vif.csa_active = false;
|
||||
ifmgd->csa_waiting_bcn = false;
|
||||
ifmgd->csa_ignored_same_chan = false;
|
||||
if (sdata->csa_block_tx) {
|
||||
ieee80211_wake_vif_queues(local, sdata,
|
||||
IEEE80211_QUEUE_STOP_REASON_CSA);
|
||||
|
@ -3204,7 +3217,8 @@ static const u64 care_about_ies =
|
|||
(1ULL << WLAN_EID_CHANNEL_SWITCH) |
|
||||
(1ULL << WLAN_EID_PWR_CONSTRAINT) |
|
||||
(1ULL << WLAN_EID_HT_CAPABILITY) |
|
||||
(1ULL << WLAN_EID_HT_OPERATION);
|
||||
(1ULL << WLAN_EID_HT_OPERATION) |
|
||||
(1ULL << WLAN_EID_EXT_CHANSWITCH_ANN);
|
||||
|
||||
static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_mgmt *mgmt, size_t len,
|
||||
|
|
|
@ -2214,6 +2214,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
|
|||
hdr = (struct ieee80211_hdr *) skb->data;
|
||||
mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
|
||||
|
||||
if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
|
||||
return RX_DROP_MONITOR;
|
||||
|
||||
/* frame is in RMC, don't forward */
|
||||
if (ieee80211_is_data(hdr->frame_control) &&
|
||||
is_multicast_ether_addr(hdr->addr1) &&
|
||||
|
|
|
@ -3178,7 +3178,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
|
|||
wdev_iter = &sdata_iter->wdev;
|
||||
|
||||
if (sdata_iter == sdata ||
|
||||
rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL ||
|
||||
!ieee80211_sdata_running(sdata_iter) ||
|
||||
local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
|
|||
int *unpinned);
|
||||
static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
|
||||
|
||||
static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
|
||||
static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
|
||||
struct rds_iw_device **rds_iwdev,
|
||||
struct rdma_cm_id **cm_id)
|
||||
{
|
||||
struct rds_iw_device *iwdev;
|
||||
struct rds_iw_cm_id *i_cm_id;
|
||||
|
@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
|
|||
src_addr->sin_port,
|
||||
dst_addr->sin_addr.s_addr,
|
||||
dst_addr->sin_port,
|
||||
rs->rs_bound_addr,
|
||||
rs->rs_bound_port,
|
||||
rs->rs_conn_addr,
|
||||
rs->rs_conn_port);
|
||||
src->sin_addr.s_addr,
|
||||
src->sin_port,
|
||||
dst->sin_addr.s_addr,
|
||||
dst->sin_port);
|
||||
#ifdef WORKING_TUPLE_DETECTION
|
||||
if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
|
||||
src_addr->sin_port == rs->rs_bound_port &&
|
||||
dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
|
||||
dst_addr->sin_port == rs->rs_conn_port) {
|
||||
if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
|
||||
src_addr->sin_port == src->sin_port &&
|
||||
dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
|
||||
dst_addr->sin_port == dst->sin_port) {
|
||||
#else
|
||||
/* FIXME - needs to compare the local and remote
|
||||
* ipaddr/port tuple, but the ipaddr is the only
|
||||
|
@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
|
|||
* zero'ed. It doesn't appear to be properly populated
|
||||
* during connection setup...
|
||||
*/
|
||||
if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
|
||||
if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
|
||||
#endif
|
||||
spin_unlock_irq(&iwdev->spinlock);
|
||||
*rds_iwdev = iwdev;
|
||||
|
@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
|
|||
{
|
||||
struct sockaddr_in *src_addr, *dst_addr;
|
||||
struct rds_iw_device *rds_iwdev_old;
|
||||
struct rds_sock rs;
|
||||
struct rdma_cm_id *pcm_id;
|
||||
int rc;
|
||||
|
||||
src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
|
||||
dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
|
||||
|
||||
rs.rs_bound_addr = src_addr->sin_addr.s_addr;
|
||||
rs.rs_bound_port = src_addr->sin_port;
|
||||
rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
|
||||
rs.rs_conn_port = dst_addr->sin_port;
|
||||
|
||||
rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
|
||||
rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
|
||||
if (rc)
|
||||
rds_iw_remove_cm_id(rds_iwdev, cm_id);
|
||||
|
||||
|
@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
|
|||
struct rds_iw_device *rds_iwdev;
|
||||
struct rds_iw_mr *ibmr = NULL;
|
||||
struct rdma_cm_id *cm_id;
|
||||
struct sockaddr_in src = {
|
||||
.sin_addr.s_addr = rs->rs_bound_addr,
|
||||
.sin_port = rs->rs_bound_port,
|
||||
};
|
||||
struct sockaddr_in dst = {
|
||||
.sin_addr.s_addr = rs->rs_conn_addr,
|
||||
.sin_port = rs->rs_conn_port,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
|
||||
ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
|
||||
if (ret || !cm_id) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
|
|
|
@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
if (!skb) {
|
||||
/* nothing remains on the queue */
|
||||
if (copied &&
|
||||
(msg->msg_flags & MSG_PEEK || timeo == 0))
|
||||
(flags & MSG_PEEK || timeo == 0))
|
||||
goto out;
|
||||
|
||||
/* wait for a message to turn up */
|
||||
|
|
|
@ -25,21 +25,41 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a,
|
|||
struct tcf_result *res)
|
||||
{
|
||||
struct tcf_bpf *b = a->priv;
|
||||
int action;
|
||||
int filter_res;
|
||||
int action, filter_res;
|
||||
|
||||
spin_lock(&b->tcf_lock);
|
||||
|
||||
b->tcf_tm.lastuse = jiffies;
|
||||
bstats_update(&b->tcf_bstats, skb);
|
||||
action = b->tcf_action;
|
||||
|
||||
filter_res = BPF_PROG_RUN(b->filter, skb);
|
||||
if (filter_res == 0) {
|
||||
/* Return code 0 from the BPF program
|
||||
* is being interpreted as a drop here.
|
||||
*/
|
||||
action = TC_ACT_SHOT;
|
||||
|
||||
/* A BPF program may overwrite the default action opcode.
|
||||
* Similarly as in cls_bpf, if filter_res == -1 we use the
|
||||
* default action specified from tc.
|
||||
*
|
||||
* In case a different well-known TC_ACT opcode has been
|
||||
* returned, it will overwrite the default one.
|
||||
*
|
||||
* For everything else that is unkown, TC_ACT_UNSPEC is
|
||||
* returned.
|
||||
*/
|
||||
switch (filter_res) {
|
||||
case TC_ACT_PIPE:
|
||||
case TC_ACT_RECLASSIFY:
|
||||
case TC_ACT_OK:
|
||||
action = filter_res;
|
||||
break;
|
||||
case TC_ACT_SHOT:
|
||||
action = filter_res;
|
||||
b->tcf_qstats.drops++;
|
||||
break;
|
||||
case TC_ACT_UNSPEC:
|
||||
action = b->tcf_action;
|
||||
break;
|
||||
default:
|
||||
action = TC_ACT_UNSPEC;
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock(&b->tcf_lock);
|
||||
|
|
|
@ -78,8 +78,11 @@ struct tc_u_hnode {
|
|||
struct tc_u_common *tp_c;
|
||||
int refcnt;
|
||||
unsigned int divisor;
|
||||
struct tc_u_knode __rcu *ht[1];
|
||||
struct rcu_head rcu;
|
||||
/* The 'ht' field MUST be the last field in structure to allow for
|
||||
* more entries allocated at end of structure.
|
||||
*/
|
||||
struct tc_u_knode __rcu *ht[1];
|
||||
};
|
||||
|
||||
struct tc_u_common {
|
||||
|
|
|
@ -4400,6 +4400,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
|
|||
if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms))
|
||||
return -EINVAL;
|
||||
|
||||
/* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT
|
||||
* as userspace might just pass through the capabilities from the IEs
|
||||
* directly, rather than enforcing this restriction and returning an
|
||||
* error in this case.
|
||||
*/
|
||||
if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) {
|
||||
params.ht_capa = NULL;
|
||||
params.vht_capa = NULL;
|
||||
}
|
||||
|
||||
/* When you run into this, adjust the code below for the new flag */
|
||||
BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
|
||||
|
||||
|
|
|
@ -2269,11 +2269,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
|
|||
* have the xfrm_state's. We need to wait for KM to
|
||||
* negotiate new SA's or bail out with error.*/
|
||||
if (net->xfrm.sysctl_larval_drop) {
|
||||
dst_release(dst);
|
||||
xfrm_pols_put(pols, drop_pols);
|
||||
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
|
||||
|
||||
return ERR_PTR(-EREMOTE);
|
||||
err = -EREMOTE;
|
||||
goto error;
|
||||
}
|
||||
|
||||
err = -EAGAIN;
|
||||
|
@ -2324,7 +2322,8 @@ nopol:
|
|||
error:
|
||||
dst_release(dst);
|
||||
dropdst:
|
||||
dst_release(dst_orig);
|
||||
if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
|
||||
dst_release(dst_orig);
|
||||
xfrm_pols_put(pols, drop_pols);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
@ -2338,7 +2337,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
|
|||
struct sock *sk, int flags)
|
||||
{
|
||||
struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
|
||||
flags | XFRM_LOOKUP_QUEUE);
|
||||
flags | XFRM_LOOKUP_QUEUE |
|
||||
XFRM_LOOKUP_KEEP_DST_REF);
|
||||
|
||||
if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
|
||||
return make_blackhole(net, dst_orig->ops->family, dst_orig);
|
||||
|
|
Loading…
Reference in New Issue