Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Made TCP congestion control documentation match current reality, from Anmol Sarma. 2) Various build warning and failure fixes from Arnd Bergmann. 3) Fix SKB list leak in ipv6_gso_segment(). 4) Use after free in ravb driver, from Eugeniu Rosca. 5) Don't use udp_poll() in ping protocol driver, from Eric Dumazet. 6) Don't crash in PCI error recovery of cxgb4 driver, from Guilherme Piccoli. 7) _SRC_NAT_DONE_BIT needs to be cleared using atomics, from Liping Zhang. 8) Use after free in vxlan deletion, from Mark Bloch. 9) Fix ordering of NAPI poll enabled in ethoc driver, from Max Filippov. 10) Fix stmmac hangs with TSO, from Niklas Cassel. 11) Fix crash in CALIPSO ipv6, from Richard Haines. 12) Clear nh_flags properly on mpls link up. From Roopa Prabhu. 13) Fix regression in sk_err socket error queue handling, noticed by ping applications. From Soheil Hassas Yeganeh. 14) Update mlx4/mlx5 MAINTAINERS information. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (78 commits) net: stmmac: fix a broken u32 less than zero check net: stmmac: fix completely hung TX when using TSO net: ethoc: enable NAPI before poll may be scheduled net: bridge: fix a null pointer dereference in br_afspec ravb: Fix use-after-free on `ifconfig eth0 down` net/ipv6: Fix CALIPSO causing GPF with datagram support net: stmmac: ensure jumbo_frm error return is correctly checked for -ve value Revert "sit: reload iphdr in ipip6_rcv" i40e/i40evf: proper update of the page_offset field i40e: Fix state flags for bit set and clean operations of PF iwlwifi: fix host command memory leaks iwlwifi: fix min API version for 7265D, 3168, 8000 and 8265 iwlwifi: mvm: clear new beacon command template struct iwlwifi: mvm: don't fail when removing a key from an inexisting sta iwlwifi: pcie: only use d0i3 in suspend/resume if system_pm is set to d0i3 iwlwifi: mvm: fix firmware debug restart recording iwlwifi: tt: move ucode_loaded check under mutex iwlwifi: mvm: support ibss in dqa mode iwlwifi: mvm: Fix command queue number on d0i3 flow iwlwifi: mvm: rs: start using LQ command color ...
This commit is contained in:
commit
b29794ec95
|
@ -26,6 +26,10 @@ Optional properties:
|
|||
- interrupt-controller : Indicates the switch is itself an interrupt
|
||||
controller. This is used for the PHY interrupts.
|
||||
#interrupt-cells = <2> : Controller uses two cells, number and flag
|
||||
- eeprom-length : Set to the length of an EEPROM connected to the
|
||||
switch. Must be set if the switch can not detect
|
||||
the presence and/or size of a connected EEPROM,
|
||||
otherwise optional.
|
||||
- mdio : Container of PHY and devices on the switches MDIO
|
||||
bus.
|
||||
- mdio? : Container of PHYs and devices on the external MDIO
|
||||
|
|
|
@ -0,0 +1,194 @@
|
|||
The QorIQ DPAA Ethernet Driver
|
||||
==============================
|
||||
|
||||
Authors:
|
||||
Madalin Bucur <madalin.bucur@nxp.com>
|
||||
Camelia Groza <camelia.groza@nxp.com>
|
||||
|
||||
Contents
|
||||
========
|
||||
|
||||
- DPAA Ethernet Overview
|
||||
- DPAA Ethernet Supported SoCs
|
||||
- Configuring DPAA Ethernet in your kernel
|
||||
- DPAA Ethernet Frame Processing
|
||||
- DPAA Ethernet Features
|
||||
- Debugging
|
||||
|
||||
DPAA Ethernet Overview
|
||||
======================
|
||||
|
||||
DPAA stands for Data Path Acceleration Architecture and it is a
|
||||
set of networking acceleration IPs that are available on several
|
||||
generations of SoCs, both on PowerPC and ARM64.
|
||||
|
||||
The Freescale DPAA architecture consists of a series of hardware blocks
|
||||
that support Ethernet connectivity. The Ethernet driver depends upon the
|
||||
following drivers in the Linux kernel:
|
||||
|
||||
- Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
|
||||
drivers/iommu/fsl_*
|
||||
- Frame Manager (FMan)
|
||||
drivers/net/ethernet/freescale/fman
|
||||
- Queue Manager (QMan), Buffer Manager (BMan)
|
||||
drivers/soc/fsl/qbman
|
||||
|
||||
A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
|
||||
|
||||
dpaa_eth /eth0\ ... /ethN\
|
||||
driver | | | |
|
||||
------------- ---- ----------- ---- -------------
|
||||
-Ports / Tx Rx \ ... / Tx Rx \
|
||||
FMan | | | |
|
||||
-MACs | MAC0 | | MACN |
|
||||
/ dtsec0 \ ... / dtsecN \ (or tgec)
|
||||
/ \ / \(or memac)
|
||||
--------- -------------- --- -------------- ---------
|
||||
FMan, FMan Port, FMan SP, FMan MURAM drivers
|
||||
---------------------------------------------------------
|
||||
FMan HW blocks: MURAM, MACs, Ports, SP
|
||||
---------------------------------------------------------
|
||||
|
||||
The dpaa_eth relation to the QMan, BMan and FMan:
|
||||
________________________________
|
||||
dpaa_eth / eth0 \
|
||||
driver / \
|
||||
--------- -^- -^- -^- --- ---------
|
||||
QMan driver / \ / \ / \ \ / | BMan |
|
||||
|Rx | |Rx | |Tx | |Tx | | driver |
|
||||
--------- |Dfl| |Err| |Cnf| |FQs| | |
|
||||
QMan HW |FQ | |FQ | |FQs| | | | |
|
||||
/ \ / \ / \ \ / | |
|
||||
--------- --- --- --- -v- ---------
|
||||
| FMan QMI | |
|
||||
| FMan HW FMan BMI | BMan HW |
|
||||
----------------------- --------
|
||||
|
||||
where the acronyms used above (and in the code) are:
|
||||
DPAA = Data Path Acceleration Architecture
|
||||
FMan = DPAA Frame Manager
|
||||
QMan = DPAA Queue Manager
|
||||
BMan = DPAA Buffers Manager
|
||||
QMI = QMan interface in FMan
|
||||
BMI = BMan interface in FMan
|
||||
FMan SP = FMan Storage Profiles
|
||||
MURAM = Multi-user RAM in FMan
|
||||
FQ = QMan Frame Queue
|
||||
Rx Dfl FQ = default reception FQ
|
||||
Rx Err FQ = Rx error frames FQ
|
||||
Tx Cnf FQ = Tx confirmation FQs
|
||||
Tx FQs = transmission frame queues
|
||||
dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
|
||||
tgec = ten gigabit Ethernet controller (10 Gbps)
|
||||
memac = multirate Ethernet MAC (10/100/1000/10000)
|
||||
|
||||
DPAA Ethernet Supported SoCs
|
||||
============================
|
||||
|
||||
The DPAA drivers enable the Ethernet controllers present on the following SoCs:
|
||||
|
||||
# PPC
|
||||
P1023
|
||||
P2041
|
||||
P3041
|
||||
P4080
|
||||
P5020
|
||||
P5040
|
||||
T1023
|
||||
T1024
|
||||
T1040
|
||||
T1042
|
||||
T2080
|
||||
T4240
|
||||
B4860
|
||||
|
||||
# ARM
|
||||
LS1043A
|
||||
LS1046A
|
||||
|
||||
Configuring DPAA Ethernet in your kernel
|
||||
========================================
|
||||
|
||||
To enable the DPAA Ethernet driver, the following Kconfig options are required:
|
||||
|
||||
# common for arch/arm64 and arch/powerpc platforms
|
||||
CONFIG_FSL_DPAA=y
|
||||
CONFIG_FSL_FMAN=y
|
||||
CONFIG_FSL_DPAA_ETH=y
|
||||
CONFIG_FSL_XGMAC_MDIO=y
|
||||
|
||||
# for arch/powerpc only
|
||||
CONFIG_FSL_PAMU=y
|
||||
|
||||
# common options needed for the PHYs used on the RDBs
|
||||
CONFIG_VITESSE_PHY=y
|
||||
CONFIG_REALTEK_PHY=y
|
||||
CONFIG_AQUANTIA_PHY=y
|
||||
|
||||
DPAA Ethernet Frame Processing
|
||||
==============================
|
||||
|
||||
On Rx, buffers for the incoming frames are retrieved from one of the three
|
||||
existing buffers pools. The driver initializes and seeds these, each with
|
||||
buffers of different sizes: 1KB, 2KB and 4KB.
|
||||
|
||||
On Tx, all transmitted frames are returned to the driver through Tx
|
||||
confirmation frame queues. The driver is then responsible for freeing the
|
||||
buffers. In order to do this properly, a backpointer is added to the buffer
|
||||
before transmission that points to the skb. When the buffer returns to the
|
||||
driver on a confirmation FQ, the skb can be correctly consumed.
|
||||
|
||||
DPAA Ethernet Features
|
||||
======================
|
||||
|
||||
Currently the DPAA Ethernet driver enables the basic features required for
|
||||
a Linux Ethernet driver. The support for advanced features will be added
|
||||
gradually.
|
||||
|
||||
The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
|
||||
checksum offload feature is enabled by default and cannot be controlled through
|
||||
ethtool.
|
||||
|
||||
The driver has support for multiple prioritized Tx traffic classes. Priorities
|
||||
range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
|
||||
strict priority levels. Each traffic class contains NR_CPU TX queues. By
|
||||
default, only one traffic class is enabled and the lowest priority Tx queues
|
||||
are used. Higher priority traffic classes can be enabled with the mqprio
|
||||
qdisc. For example, all four traffic classes are enabled on an interface with
|
||||
the following command. Furthermore, skb priority levels are mapped to traffic
|
||||
classes as follows:
|
||||
|
||||
* priorities 0 to 3 - traffic class 0 (low priority)
|
||||
* priorities 4 to 7 - traffic class 1 (medium-low priority)
|
||||
* priorities 8 to 11 - traffic class 2 (medium-high priority)
|
||||
* priorities 12 to 15 - traffic class 3 (high priority)
|
||||
|
||||
tc qdisc add dev <int> root handle 1: \
|
||||
mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
|
||||
|
||||
Debugging
|
||||
=========
|
||||
|
||||
The following statistics are exported for each interface through ethtool:
|
||||
|
||||
- interrupt count per CPU
|
||||
- Rx packets count per CPU
|
||||
- Tx packets count per CPU
|
||||
- Tx confirmed packets count per CPU
|
||||
- Tx S/G frames count per CPU
|
||||
- Tx error count per CPU
|
||||
- Rx error count per CPU
|
||||
- Rx error count per type
|
||||
- congestion related statistics:
|
||||
- congestion status
|
||||
- time spent in congestion
|
||||
- number of time the device entered congestion
|
||||
- dropped packets count per cause
|
||||
|
||||
The driver also exports the following information in sysfs:
|
||||
|
||||
- the FQ IDs for each FQ type
|
||||
/sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
|
||||
|
||||
- the IDs of the buffer pools in use
|
||||
/sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
|
|
@ -1,7 +1,7 @@
|
|||
TCP protocol
|
||||
============
|
||||
|
||||
Last updated: 9 February 2008
|
||||
Last updated: 3 June 2017
|
||||
|
||||
Contents
|
||||
========
|
||||
|
@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms.
|
|||
A congestion control mechanism can be registered through functions in
|
||||
tcp_cong.c. The functions used by the congestion control mechanism are
|
||||
registered via passing a tcp_congestion_ops struct to
|
||||
tcp_register_congestion_control. As a minimum name, ssthresh,
|
||||
cong_avoid must be valid.
|
||||
tcp_register_congestion_control. As a minimum, the congestion control
|
||||
mechanism must provide a valid name and must implement either ssthresh,
|
||||
cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
|
||||
|
||||
Private data for a congestion control mechanism is stored in tp->ca_priv.
|
||||
tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
|
||||
is important to check the size of your private data will fit this space, or
|
||||
alternatively space could be allocated elsewhere and a pointer to it could
|
||||
alternatively, space could be allocated elsewhere and a pointer to it could
|
||||
be stored here.
|
||||
|
||||
There are three kinds of congestion control algorithms currently: The
|
||||
simplest ones are derived from TCP reno (highspeed, scalable) and just
|
||||
provide an alternative the congestion window calculation. More complex
|
||||
provide an alternative congestion window calculation. More complex
|
||||
ones like BIC try to look at other events to provide better
|
||||
heuristics. There are also round trip time based algorithms like
|
||||
Vegas and Westwood+.
|
||||
|
@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm
|
|||
needs to maintain fairness and performance. Please review current
|
||||
research and RFC's before developing new modules.
|
||||
|
||||
The method that is used to determine which congestion control mechanism is
|
||||
determined by the setting of the sysctl net.ipv4.tcp_congestion_control.
|
||||
The default congestion control will be the last one registered (LIFO);
|
||||
so if you built everything as modules, the default will be reno. If you
|
||||
build with the defaults from Kconfig, then CUBIC will be builtin (not a
|
||||
module) and it will end up the default.
|
||||
The default congestion control mechanism is chosen based on the
|
||||
DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
|
||||
value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
|
||||
module will be autoloaded if needed and you will get the expected protocol. If
|
||||
you ask for an unknown congestion method, then the sysctl attempt will fail.
|
||||
|
||||
If you really want a particular default value then you will need
|
||||
to set it with the sysctl. If you use a sysctl, the module will be autoloaded
|
||||
if needed and you will get the expected protocol. If you ask for an
|
||||
unknown congestion method, then the sysctl attempt will fail.
|
||||
|
||||
If you remove a tcp congestion control module, then you will get the next
|
||||
If you remove a TCP congestion control module, then you will get the next
|
||||
available one. Since reno cannot be built as a module, and cannot be
|
||||
deleted, it will always be available.
|
||||
removed, it will always be available.
|
||||
|
||||
How the new TCP output machine [nyi] works.
|
||||
===========================================
|
||||
|
|
|
@ -8508,7 +8508,7 @@ S: Odd Fixes
|
|||
F: drivers/media/radio/radio-miropcm20*
|
||||
|
||||
MELLANOX MLX4 core VPI driver
|
||||
M: Yishai Hadas <yishaih@mellanox.com>
|
||||
M: Tariq Toukan <tariqt@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-rdma@vger.kernel.org
|
||||
W: http://www.mellanox.com
|
||||
|
@ -8516,7 +8516,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
|||
S: Supported
|
||||
F: drivers/net/ethernet/mellanox/mlx4/
|
||||
F: include/linux/mlx4/
|
||||
F: include/uapi/rdma/mlx4-abi.h
|
||||
|
||||
MELLANOX MLX4 IB driver
|
||||
M: Yishai Hadas <yishaih@mellanox.com>
|
||||
|
@ -8526,6 +8525,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
|||
S: Supported
|
||||
F: drivers/infiniband/hw/mlx4/
|
||||
F: include/linux/mlx4/
|
||||
F: include/uapi/rdma/mlx4-abi.h
|
||||
|
||||
MELLANOX MLX5 core VPI driver
|
||||
M: Saeed Mahameed <saeedm@mellanox.com>
|
||||
|
@ -8538,7 +8538,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
|||
S: Supported
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/
|
||||
F: include/linux/mlx5/
|
||||
F: include/uapi/rdma/mlx5-abi.h
|
||||
|
||||
MELLANOX MLX5 IB driver
|
||||
M: Matan Barak <matanb@mellanox.com>
|
||||
|
@ -8549,6 +8548,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
|||
S: Supported
|
||||
F: drivers/infiniband/hw/mlx5/
|
||||
F: include/linux/mlx5/
|
||||
F: include/uapi/rdma/mlx5-abi.h
|
||||
|
||||
MELEXIS MLX90614 DRIVER
|
||||
M: Crt Mori <cmo@melexis.com>
|
||||
|
|
|
@ -120,10 +120,16 @@
|
|||
|
||||
ethphy0: ethernet-phy@2 {
|
||||
reg = <2>;
|
||||
micrel,led-mode = <1>;
|
||||
clocks = <&clks IMX6UL_CLK_ENET_REF>;
|
||||
clock-names = "rmii-ref";
|
||||
};
|
||||
|
||||
ethphy1: ethernet-phy@1 {
|
||||
reg = <1>;
|
||||
micrel,led-mode = <1>;
|
||||
clocks = <&clks IMX6UL_CLK_ENET2_REF>;
|
||||
clock-names = "rmii-ref";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
|
|||
id);
|
||||
return NULL;
|
||||
} else {
|
||||
rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
|
||||
rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
|
||||
if (!rs)
|
||||
return NULL;
|
||||
rs->state = CCPResetIdle;
|
||||
|
|
|
@ -75,7 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
|
|||
if (sk->sk_state != MISDN_BOUND)
|
||||
continue;
|
||||
if (!cskb)
|
||||
cskb = skb_copy(skb, GFP_KERNEL);
|
||||
cskb = skb_copy(skb, GFP_ATOMIC);
|
||||
if (!cskb) {
|
||||
printk(KERN_WARNING "%s no skb\n", __func__);
|
||||
break;
|
||||
|
|
|
@ -114,13 +114,13 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
|
||||
int src_port, u16 data)
|
||||
static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
|
||||
int src_dev, int src_port, u16 data)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
|
||||
static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
|
@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
|
|||
struct xgbe_ring *ring,
|
||||
struct xgbe_ring_data *rdata)
|
||||
{
|
||||
int order, ret;
|
||||
int ret;
|
||||
|
||||
if (!ring->rx_hdr_pa.pages) {
|
||||
ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
|
||||
|
@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
|
|||
}
|
||||
|
||||
if (!ring->rx_buf_pa.pages) {
|
||||
order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
|
||||
ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
|
||||
order);
|
||||
PAGE_ALLOC_COSTLY_ORDER);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2026,9 +2026,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
|||
priv->num_rx_desc_words = params->num_rx_desc_words;
|
||||
|
||||
priv->irq0 = platform_get_irq(pdev, 0);
|
||||
if (!priv->is_lite)
|
||||
if (!priv->is_lite) {
|
||||
priv->irq1 = platform_get_irq(pdev, 1);
|
||||
priv->wol_irq = platform_get_irq(pdev, 2);
|
||||
priv->wol_irq = platform_get_irq(pdev, 2);
|
||||
} else {
|
||||
priv->wol_irq = platform_get_irq(pdev, 1);
|
||||
}
|
||||
if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
|
||||
dev_err(&pdev->dev, "invalid interrupts\n");
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* select a non-FCoE queue */
|
||||
return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
|
||||
return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
|
||||
}
|
||||
|
||||
void bnx2x_set_num_queues(struct bnx2x *bp)
|
||||
|
|
|
@ -2196,10 +2196,14 @@ static int cxgb_up(struct adapter *adap)
|
|||
if (err)
|
||||
goto irq_err;
|
||||
}
|
||||
|
||||
mutex_lock(&uld_mutex);
|
||||
enable_rx(adap);
|
||||
t4_sge_start(adap);
|
||||
t4_intr_enable(adap);
|
||||
adap->flags |= FULL_INIT_DONE;
|
||||
mutex_unlock(&uld_mutex);
|
||||
|
||||
notify_ulds(adap, CXGB4_STATE_UP);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
update_clip(adap);
|
||||
|
@ -2771,6 +2775,9 @@ void t4_fatal_err(struct adapter *adap)
|
|||
{
|
||||
int port;
|
||||
|
||||
if (pci_channel_offline(adap->pdev))
|
||||
return;
|
||||
|
||||
/* Disable the SGE since ULDs are going to free resources that
|
||||
* could be exposed to the adapter. RDMA MWs for example...
|
||||
*/
|
||||
|
@ -3882,9 +3889,10 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
|
|||
spin_lock(&adap->stats_lock);
|
||||
for_each_port(adap, i) {
|
||||
struct net_device *dev = adap->port[i];
|
||||
|
||||
netif_device_detach(dev);
|
||||
netif_carrier_off(dev);
|
||||
if (dev) {
|
||||
netif_device_detach(dev);
|
||||
netif_carrier_off(dev);
|
||||
}
|
||||
}
|
||||
spin_unlock(&adap->stats_lock);
|
||||
disable_interrupts(adap);
|
||||
|
@ -3963,12 +3971,13 @@ static void eeh_resume(struct pci_dev *pdev)
|
|||
rtnl_lock();
|
||||
for_each_port(adap, i) {
|
||||
struct net_device *dev = adap->port[i];
|
||||
|
||||
if (netif_running(dev)) {
|
||||
link_start(dev);
|
||||
cxgb_set_rxmode(dev);
|
||||
if (dev) {
|
||||
if (netif_running(dev)) {
|
||||
link_start(dev);
|
||||
cxgb_set_rxmode(dev);
|
||||
}
|
||||
netif_device_attach(dev);
|
||||
}
|
||||
netif_device_attach(dev);
|
||||
}
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
|
|
@ -4557,8 +4557,13 @@ void t4_intr_enable(struct adapter *adapter)
|
|||
*/
|
||||
void t4_intr_disable(struct adapter *adapter)
|
||||
{
|
||||
u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
|
||||
u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
|
||||
u32 whoami, pf;
|
||||
|
||||
if (pci_channel_offline(adapter->pdev))
|
||||
return;
|
||||
|
||||
whoami = t4_read_reg(adapter, PL_WHOAMI_A);
|
||||
pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
|
||||
SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
|
||||
|
||||
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
|
||||
#define T4FW_VERSION_MAJOR 0x01
|
||||
#define T4FW_VERSION_MINOR 0x10
|
||||
#define T4FW_VERSION_MICRO 0x2B
|
||||
#define T4FW_VERSION_MICRO 0x2D
|
||||
#define T4FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T4FW_MIN_VERSION_MAJOR 0x01
|
||||
|
@ -46,7 +46,7 @@
|
|||
|
||||
#define T5FW_VERSION_MAJOR 0x01
|
||||
#define T5FW_VERSION_MINOR 0x10
|
||||
#define T5FW_VERSION_MICRO 0x2B
|
||||
#define T5FW_VERSION_MICRO 0x2D
|
||||
#define T5FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T5FW_MIN_VERSION_MAJOR 0x00
|
||||
|
@ -55,7 +55,7 @@
|
|||
|
||||
#define T6FW_VERSION_MAJOR 0x01
|
||||
#define T6FW_VERSION_MINOR 0x10
|
||||
#define T6FW_VERSION_MICRO 0x2B
|
||||
#define T6FW_VERSION_MICRO 0x2D
|
||||
#define T6FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T6FW_MIN_VERSION_MAJOR 0x00
|
||||
|
|
|
@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
|
||||
ethoc_init_ring(priv, dev->mem_start);
|
||||
ethoc_reset(priv);
|
||||
|
||||
|
@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev)
|
|||
priv->old_duplex = -1;
|
||||
|
||||
phy_start(dev->phydev);
|
||||
napi_enable(&priv->napi);
|
||||
|
||||
if (netif_msg_ifup(priv)) {
|
||||
dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
|
||||
|
|
|
@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
|
|||
{
|
||||
const struct of_device_id *id =
|
||||
of_match_device(fsl_pq_mdio_match, &pdev->dev);
|
||||
const struct fsl_pq_mdio_data *data = id->data;
|
||||
const struct fsl_pq_mdio_data *data;
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct resource res;
|
||||
struct device_node *tbi;
|
||||
|
@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
|
|||
struct mii_bus *new_bus;
|
||||
int err;
|
||||
|
||||
if (!id) {
|
||||
dev_err(&pdev->dev, "Failed to match device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
data = id->data;
|
||||
|
||||
dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
|
||||
|
||||
new_bus = mdiobus_alloc_size(sizeof(*priv));
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
static const char ibmvnic_driver_name[] = "ibmvnic";
|
||||
static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
|
||||
|
||||
MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
|
||||
MODULE_AUTHOR("Santiago Leon");
|
||||
MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
|
||||
|
|
|
@ -295,7 +295,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
|
|||
**/
|
||||
void i40e_service_event_schedule(struct i40e_pf *pf)
|
||||
{
|
||||
if (!test_bit(__I40E_VSI_DOWN, pf->state) &&
|
||||
if (!test_bit(__I40E_DOWN, pf->state) &&
|
||||
!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
|
||||
queue_work(i40e_wq, &pf->service_task);
|
||||
}
|
||||
|
@ -3611,7 +3611,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
|
|||
* this is not a performance path and napi_schedule()
|
||||
* can deal with rescheduling.
|
||||
*/
|
||||
if (!test_bit(__I40E_VSI_DOWN, pf->state))
|
||||
if (!test_bit(__I40E_DOWN, pf->state))
|
||||
napi_schedule_irqoff(&q_vector->napi);
|
||||
}
|
||||
|
||||
|
@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
|
|||
enable_intr:
|
||||
/* re-enable interrupt causes */
|
||||
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
|
||||
if (!test_bit(__I40E_VSI_DOWN, pf->state)) {
|
||||
if (!test_bit(__I40E_DOWN, pf->state)) {
|
||||
i40e_service_event_schedule(pf);
|
||||
i40e_irq_dynamic_enable_icr0(pf, false);
|
||||
}
|
||||
|
@ -6203,7 +6203,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
|
|||
{
|
||||
|
||||
/* if interface is down do nothing */
|
||||
if (test_bit(__I40E_VSI_DOWN, pf->state))
|
||||
if (test_bit(__I40E_DOWN, pf->state))
|
||||
return;
|
||||
|
||||
if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
|
||||
|
@ -6344,7 +6344,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
|
|||
int i;
|
||||
|
||||
/* if interface is down do nothing */
|
||||
if (test_bit(__I40E_VSI_DOWN, pf->state) ||
|
||||
if (test_bit(__I40E_DOWN, pf->state) ||
|
||||
test_bit(__I40E_CONFIG_BUSY, pf->state))
|
||||
return;
|
||||
|
||||
|
@ -6399,9 +6399,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
|
|||
reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
|
||||
clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
|
||||
}
|
||||
if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) {
|
||||
reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED);
|
||||
clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state);
|
||||
if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
|
||||
reset_flags |= BIT(__I40E_DOWN_REQUESTED);
|
||||
clear_bit(__I40E_DOWN_REQUESTED, pf->state);
|
||||
}
|
||||
|
||||
/* If there's a recovery already waiting, it takes
|
||||
|
@ -6415,7 +6415,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
|
|||
|
||||
/* If we're already down or resetting, just bail */
|
||||
if (reset_flags &&
|
||||
!test_bit(__I40E_VSI_DOWN, pf->state) &&
|
||||
!test_bit(__I40E_DOWN, pf->state) &&
|
||||
!test_bit(__I40E_CONFIG_BUSY, pf->state)) {
|
||||
rtnl_lock();
|
||||
i40e_do_reset(pf, reset_flags, true);
|
||||
|
@ -7002,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
|
|||
u32 val;
|
||||
int v;
|
||||
|
||||
if (test_bit(__I40E_VSI_DOWN, pf->state))
|
||||
if (test_bit(__I40E_DOWN, pf->state))
|
||||
goto clear_recovery;
|
||||
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
|
||||
|
||||
|
@ -9767,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
|
|||
return -ENODEV;
|
||||
}
|
||||
if (vsi == pf->vsi[pf->lan_vsi] &&
|
||||
!test_bit(__I40E_VSI_DOWN, pf->state)) {
|
||||
!test_bit(__I40E_DOWN, pf->state)) {
|
||||
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -11003,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
}
|
||||
pf->next_vsi = 0;
|
||||
pf->pdev = pdev;
|
||||
set_bit(__I40E_VSI_DOWN, pf->state);
|
||||
set_bit(__I40E_DOWN, pf->state);
|
||||
|
||||
hw = &pf->hw;
|
||||
hw->back = pf;
|
||||
|
@ -11293,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
* before setting up the misc vector or we get a race and the vector
|
||||
* ends up disabled forever.
|
||||
*/
|
||||
clear_bit(__I40E_VSI_DOWN, pf->state);
|
||||
clear_bit(__I40E_DOWN, pf->state);
|
||||
|
||||
/* In case of MSIX we are going to setup the misc vector right here
|
||||
* to handle admin queue events etc. In case of legacy and MSI
|
||||
|
@ -11448,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
/* Unwind what we've done if something failed in the setup */
|
||||
err_vsis:
|
||||
set_bit(__I40E_VSI_DOWN, pf->state);
|
||||
set_bit(__I40E_DOWN, pf->state);
|
||||
i40e_clear_interrupt_scheme(pf);
|
||||
kfree(pf->vsi);
|
||||
err_switch_setup:
|
||||
|
@ -11500,7 +11500,7 @@ static void i40e_remove(struct pci_dev *pdev)
|
|||
|
||||
/* no more scheduling of any task */
|
||||
set_bit(__I40E_SUSPENDED, pf->state);
|
||||
set_bit(__I40E_VSI_DOWN, pf->state);
|
||||
set_bit(__I40E_DOWN, pf->state);
|
||||
if (pf->service_timer.data)
|
||||
del_timer_sync(&pf->service_timer);
|
||||
if (pf->service_task.func)
|
||||
|
@ -11740,7 +11740,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
|
|||
struct i40e_hw *hw = &pf->hw;
|
||||
|
||||
set_bit(__I40E_SUSPENDED, pf->state);
|
||||
set_bit(__I40E_VSI_DOWN, pf->state);
|
||||
set_bit(__I40E_DOWN, pf->state);
|
||||
rtnl_lock();
|
||||
i40e_prep_for_reset(pf, true);
|
||||
rtnl_unlock();
|
||||
|
@ -11789,7 +11789,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
int retval = 0;
|
||||
|
||||
set_bit(__I40E_SUSPENDED, pf->state);
|
||||
set_bit(__I40E_VSI_DOWN, pf->state);
|
||||
set_bit(__I40E_DOWN, pf->state);
|
||||
|
||||
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
|
||||
i40e_enable_mc_magic_wake(pf);
|
||||
|
@ -11841,7 +11841,7 @@ static int i40e_resume(struct pci_dev *pdev)
|
|||
|
||||
/* handling the reset will rebuild the device state */
|
||||
if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
|
||||
clear_bit(__I40E_VSI_DOWN, pf->state);
|
||||
clear_bit(__I40E_DOWN, pf->state);
|
||||
rtnl_lock();
|
||||
i40e_reset_and_rebuild(pf, false, true);
|
||||
rtnl_unlock();
|
||||
|
|
|
@ -1854,7 +1854,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
|
|||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
|
||||
#else
|
||||
unsigned int truesize = SKB_DATA_ALIGN(size);
|
||||
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
||||
SKB_DATA_ALIGN(I40E_SKB_PAD + size);
|
||||
#endif
|
||||
struct sk_buff *skb;
|
||||
|
||||
|
|
|
@ -1190,7 +1190,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
|
|||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
|
||||
#else
|
||||
unsigned int truesize = SKB_DATA_ALIGN(size);
|
||||
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
||||
SKB_DATA_ALIGN(I40E_SKB_PAD + size);
|
||||
#endif
|
||||
struct sk_buff *skb;
|
||||
|
||||
|
|
|
@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev,
|
|||
qpn = priv->drop_qp.qpn;
|
||||
else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
|
||||
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
|
||||
if (qpn < priv->rss_map.base_qpn ||
|
||||
qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
|
||||
en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
|
||||
en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/etherdevice.h>
|
||||
|
||||
#include <linux/mlx4/cmd.h>
|
||||
#include <linux/mlx4/qp.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
|
@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
|
|||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
if (!mlx4_qp_lookup(dev, rule->qpn)) {
|
||||
mlx4_err_rule(dev, "QP doesn't exist\n", rule);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trans_rule_ctrl_to_hw(rule, mailbox->buf);
|
||||
|
||||
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
|
||||
|
||||
list_for_each_entry(cur, &rule->list, list) {
|
||||
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
|
||||
if (ret < 0) {
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
size += ret;
|
||||
}
|
||||
|
||||
|
@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
|
|||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
|
|||
__mlx4_qp_free_icm(dev, qpn);
|
||||
}
|
||||
|
||||
struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
|
||||
{
|
||||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
struct mlx4_qp *qp;
|
||||
|
||||
spin_lock(&qp_table->lock);
|
||||
|
||||
qp = __mlx4_qp_lookup(dev, qpn);
|
||||
|
||||
spin_unlock(&qp_table->lock);
|
||||
return qp;
|
||||
}
|
||||
|
||||
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
@ -471,6 +484,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
|
|||
}
|
||||
|
||||
if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
|
||||
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
|
||||
mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
|
||||
cmd->qp_context.qos_vport = params->qos_vport;
|
||||
}
|
||||
|
|
|
@ -5255,6 +5255,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
|
|||
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
|
||||
}
|
||||
|
||||
static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
|
||||
struct mlx4_vf_immed_vlan_work *work)
|
||||
{
|
||||
ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
|
||||
ctx->qp_context.qos_vport = work->qos_vport;
|
||||
}
|
||||
|
||||
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
|
||||
{
|
||||
struct mlx4_vf_immed_vlan_work *work =
|
||||
|
@ -5369,11 +5376,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
|
|||
qp->sched_queue & 0xC7;
|
||||
upd_context->qp_context.pri_path.sched_queue |=
|
||||
((work->qos & 0x7) << 3);
|
||||
upd_context->qp_mask |=
|
||||
cpu_to_be64(1ULL <<
|
||||
MLX4_UPD_QP_MASK_QOS_VPP);
|
||||
upd_context->qp_context.qos_vport =
|
||||
work->qos_vport;
|
||||
|
||||
if (dev->caps.flags2 &
|
||||
MLX4_DEV_CAP_FLAG2_QOS_VPP)
|
||||
update_qos_vpp(upd_context, work);
|
||||
}
|
||||
|
||||
err = mlx4_cmd(dev, mailbox->dma,
|
||||
|
|
|
@ -621,10 +621,9 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|||
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
|
||||
priv->irq_info[i].mask);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (irq_set_affinity_hint(irq, priv->irq_info[i].mask))
|
||||
if (IS_ENABLED(CONFIG_SMP) &&
|
||||
irq_set_affinity_hint(irq, priv->irq_info[i].mask))
|
||||
mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1730,7 +1730,8 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
|
|||
qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
|
||||
break;
|
||||
default:
|
||||
DP_ERR(cdev, "Invalid protocol type = %d\n", type);
|
||||
DP_VERBOSE(cdev, QED_MSG_SP,
|
||||
"Invalid protocol type = %d\n", type);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1824,22 +1824,44 @@ struct qlcnic_hardware_ops {
|
|||
u32 (*get_cap_size)(void *, int);
|
||||
void (*set_sys_info)(void *, int, u32);
|
||||
void (*store_cap_mask)(void *, u32);
|
||||
bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
|
||||
bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
|
||||
};
|
||||
|
||||
extern struct qlcnic_nic_template qlcnic_vf_ops;
|
||||
|
||||
static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
|
||||
static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
return adapter->ahw->extra_capability[0] &
|
||||
QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
|
||||
}
|
||||
|
||||
static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
|
||||
static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
return adapter->ahw->extra_capability[0] &
|
||||
QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
|
||||
}
|
||||
|
||||
static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
return adapter->ahw->hw_ops->encap_rx_offload(adapter);
|
||||
}
|
||||
|
||||
static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
return adapter->ahw->hw_ops->encap_tx_offload(adapter);
|
||||
}
|
||||
|
||||
static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
return adapter->nic_ops->start_firmware(adapter);
|
||||
|
|
|
@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
|
|||
.get_cap_size = qlcnic_83xx_get_cap_size,
|
||||
.set_sys_info = qlcnic_83xx_set_sys_info,
|
||||
.store_cap_mask = qlcnic_83xx_store_cap_mask,
|
||||
.encap_rx_offload = qlcnic_83xx_encap_rx_offload,
|
||||
.encap_tx_offload = qlcnic_83xx_encap_tx_offload,
|
||||
};
|
||||
|
||||
static struct qlcnic_nic_template qlcnic_83xx_ops = {
|
||||
|
|
|
@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
|
|||
}
|
||||
return -EIO;
|
||||
}
|
||||
usleep_range(1000, 1500);
|
||||
udelay(1200);
|
||||
}
|
||||
|
||||
if (id_reg)
|
||||
|
|
|
@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
|
|||
.get_cap_size = qlcnic_82xx_get_cap_size,
|
||||
.set_sys_info = qlcnic_82xx_set_sys_info,
|
||||
.store_cap_mask = qlcnic_82xx_store_cap_mask,
|
||||
.encap_rx_offload = qlcnic_82xx_encap_rx_offload,
|
||||
.encap_tx_offload = qlcnic_82xx_encap_tx_offload,
|
||||
};
|
||||
|
||||
static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
|
||||
|
|
|
@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
|
|||
.free_mac_list = qlcnic_sriov_vf_free_mac_list,
|
||||
.enable_sds_intr = qlcnic_83xx_enable_sds_intr,
|
||||
.disable_sds_intr = qlcnic_83xx_disable_sds_intr,
|
||||
.encap_rx_offload = qlcnic_83xx_encap_rx_offload,
|
||||
.encap_tx_offload = qlcnic_83xx_encap_tx_offload,
|
||||
};
|
||||
|
||||
static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
|
||||
|
|
|
@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt)
|
|||
emac_mac_config(adpt);
|
||||
emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
|
||||
|
||||
adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
|
||||
adpt->phydev->irq = PHY_POLL;
|
||||
ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
|
||||
PHY_INTERFACE_MODE_SGMII);
|
||||
if (ret) {
|
||||
|
|
|
@ -13,15 +13,11 @@
|
|||
/* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/of_mdio.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/acpi.h>
|
||||
#include "emac.h"
|
||||
#include "emac-mac.h"
|
||||
|
||||
/* EMAC base register offsets */
|
||||
#define EMAC_MDIO_CTRL 0x001414
|
||||
|
@ -52,62 +48,10 @@
|
|||
|
||||
#define MDIO_WAIT_TIMES 1000
|
||||
|
||||
#define EMAC_LINK_SPEED_DEFAULT (\
|
||||
EMAC_LINK_SPEED_10_HALF |\
|
||||
EMAC_LINK_SPEED_10_FULL |\
|
||||
EMAC_LINK_SPEED_100_HALF |\
|
||||
EMAC_LINK_SPEED_100_FULL |\
|
||||
EMAC_LINK_SPEED_1GB_FULL)
|
||||
|
||||
/**
|
||||
* emac_phy_mdio_autopoll_disable() - disable mdio autopoll
|
||||
* @adpt: the emac adapter
|
||||
*
|
||||
* The autopoll feature takes over the MDIO bus. In order for
|
||||
* the PHY driver to be able to talk to the PHY over the MDIO
|
||||
* bus, we need to temporarily disable the autopoll feature.
|
||||
*/
|
||||
static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* disable autopoll */
|
||||
emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
|
||||
|
||||
/* wait for any mdio polling to complete */
|
||||
if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
|
||||
!(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
|
||||
return 0;
|
||||
|
||||
/* failed to disable; ensure it is enabled before returning */
|
||||
emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/**
|
||||
* emac_phy_mdio_autopoll_disable() - disable mdio autopoll
|
||||
* @adpt: the emac adapter
|
||||
*
|
||||
* The EMAC has the ability to poll the external PHY on the MDIO
|
||||
* bus for link state changes. This eliminates the need for the
|
||||
* driver to poll the phy. If if the link state does change,
|
||||
* the EMAC issues an interrupt on behalf of the PHY.
|
||||
*/
|
||||
static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
|
||||
{
|
||||
emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
|
||||
}
|
||||
|
||||
static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
|
||||
{
|
||||
struct emac_adapter *adpt = bus->priv;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
ret = emac_phy_mdio_autopoll_disable(adpt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
|
||||
(addr << PHY_ADDR_SHFT));
|
||||
|
@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
|
|||
if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
|
||||
!(reg & (MDIO_START | MDIO_BUSY)),
|
||||
100, MDIO_WAIT_TIMES * 100))
|
||||
ret = -EIO;
|
||||
else
|
||||
ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
|
||||
return -EIO;
|
||||
|
||||
emac_phy_mdio_autopoll_enable(adpt);
|
||||
|
||||
return ret;
|
||||
return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
|
||||
}
|
||||
|
||||
static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
|
||||
{
|
||||
struct emac_adapter *adpt = bus->priv;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
ret = emac_phy_mdio_autopoll_disable(adpt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
|
||||
(addr << PHY_ADDR_SHFT));
|
||||
|
@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
|
|||
if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
|
||||
!(reg & (MDIO_START | MDIO_BUSY)), 100,
|
||||
MDIO_WAIT_TIMES * 100))
|
||||
ret = -EIO;
|
||||
return -EIO;
|
||||
|
||||
emac_phy_mdio_autopoll_enable(adpt);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Configure the MDIO bus and connect the external PHY */
|
||||
|
|
|
@ -50,19 +50,7 @@
|
|||
#define DMAR_DLY_CNT_DEF 15
|
||||
#define DMAW_DLY_CNT_DEF 4
|
||||
|
||||
#define IMR_NORMAL_MASK (\
|
||||
ISR_ERROR |\
|
||||
ISR_GPHY_LINK |\
|
||||
ISR_TX_PKT |\
|
||||
GPHY_WAKEUP_INT)
|
||||
|
||||
#define IMR_EXTENDED_MASK (\
|
||||
SW_MAN_INT |\
|
||||
ISR_OVER |\
|
||||
ISR_ERROR |\
|
||||
ISR_GPHY_LINK |\
|
||||
ISR_TX_PKT |\
|
||||
GPHY_WAKEUP_INT)
|
||||
#define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
|
||||
|
||||
#define ISR_TX_PKT (\
|
||||
TX_PKT_INT |\
|
||||
|
@ -70,10 +58,6 @@
|
|||
TX_PKT_INT2 |\
|
||||
TX_PKT_INT3)
|
||||
|
||||
#define ISR_GPHY_LINK (\
|
||||
GPHY_LINK_UP_INT |\
|
||||
GPHY_LINK_DOWN_INT)
|
||||
|
||||
#define ISR_OVER (\
|
||||
RFD0_UR_INT |\
|
||||
RFD1_UR_INT |\
|
||||
|
@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data)
|
|||
if (status & ISR_OVER)
|
||||
net_warn_ratelimited("warning: TX/RX overflow\n");
|
||||
|
||||
/* link event */
|
||||
if (status & ISR_GPHY_LINK)
|
||||
phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
|
||||
|
||||
exit:
|
||||
/* enable the interrupt */
|
||||
writel(irq->mask, adpt->base + EMAC_INT_MASK);
|
||||
|
|
|
@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
|
|||
int ring_size;
|
||||
int i;
|
||||
|
||||
/* Free RX skb ringbuffer */
|
||||
if (priv->rx_skb[q]) {
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++)
|
||||
dev_kfree_skb(priv->rx_skb[q][i]);
|
||||
}
|
||||
kfree(priv->rx_skb[q]);
|
||||
priv->rx_skb[q] = NULL;
|
||||
|
||||
/* Free aligned TX buffers */
|
||||
kfree(priv->tx_align[q]);
|
||||
priv->tx_align[q] = NULL;
|
||||
|
||||
if (priv->rx_ring[q]) {
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
|
||||
|
@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
|
|||
priv->tx_ring[q] = NULL;
|
||||
}
|
||||
|
||||
/* Free RX skb ringbuffer */
|
||||
if (priv->rx_skb[q]) {
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++)
|
||||
dev_kfree_skb(priv->rx_skb[q][i]);
|
||||
}
|
||||
kfree(priv->rx_skb[q]);
|
||||
priv->rx_skb[q] = NULL;
|
||||
|
||||
/* Free aligned TX buffers */
|
||||
kfree(priv->tx_align[q]);
|
||||
priv->tx_align[q] = NULL;
|
||||
|
||||
/* Free TX skb ringbuffer.
|
||||
* SKBs are freed by ravb_tx_free() call above.
|
||||
*/
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
|
||||
#define TSE_PCS_CONTROL_REG 0x00
|
||||
#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
|
||||
#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140
|
||||
#define TSE_PCS_IF_MODE_REG 0x28
|
||||
#define TSE_PCS_LINK_TIMER_0_REG 0x24
|
||||
#define TSE_PCS_LINK_TIMER_1_REG 0x26
|
||||
|
@ -65,6 +66,7 @@
|
|||
#define TSE_PCS_SW_RESET_TIMEOUT 100
|
||||
#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
|
||||
#define TSE_PCS_USE_SGMII_ENA BIT(0)
|
||||
#define TSE_PCS_IF_USE_SGMII 0x03
|
||||
|
||||
#define SGMII_ADAPTER_CTRL_REG 0x00
|
||||
#define SGMII_ADAPTER_DISABLE 0x0001
|
||||
|
@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
|
||||
writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
|
||||
|
||||
writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
|
||||
|
||||
writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
|
||||
writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
|
||||
|
|
|
@ -1208,7 +1208,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
|
|||
u32 rx_count = priv->plat->rx_queues_to_use;
|
||||
unsigned int bfsize = 0;
|
||||
int ret = -ENOMEM;
|
||||
u32 queue;
|
||||
int queue;
|
||||
int i;
|
||||
|
||||
if (priv->hw->mode->set_16kib_bfsize)
|
||||
|
@ -2724,7 +2724,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
|
|||
|
||||
priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
|
||||
0, 1,
|
||||
(last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
|
||||
(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
|
||||
0, 0);
|
||||
|
||||
tmp_len -= TSO_MAX_BUFF_SIZE;
|
||||
|
@ -2947,7 +2947,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
int i, csum_insertion = 0, is_jumbo = 0;
|
||||
u32 queue = skb_get_queue_mapping(skb);
|
||||
int nfrags = skb_shinfo(skb)->nr_frags;
|
||||
unsigned int entry, first_entry;
|
||||
int entry;
|
||||
unsigned int first_entry;
|
||||
struct dma_desc *desc, *first;
|
||||
struct stmmac_tx_queue *tx_q;
|
||||
unsigned int enh_desc;
|
||||
|
|
|
@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
|
|||
|
||||
/* make enough headroom for basic scenario */
|
||||
encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
|
||||
if (ip_tunnel_info_af(info) == AF_INET) {
|
||||
if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
|
||||
encap_len += sizeof(struct iphdr);
|
||||
dev->max_mtu -= sizeof(struct iphdr);
|
||||
} else {
|
||||
|
|
|
@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
case HDLCDRVCTL_CALIBRATE:
|
||||
if(!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
if (s->par.bitrate <= 0)
|
||||
return -EINVAL;
|
||||
if (bi.data.calibrate > INT_MAX / s->par.bitrate)
|
||||
return -EINVAL;
|
||||
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
|
||||
|
|
|
@ -1127,8 +1127,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
|
|||
if (adv < 0)
|
||||
return adv;
|
||||
|
||||
lpa &= adv;
|
||||
|
||||
if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
|
||||
phydev->duplex = DUPLEX_FULL;
|
||||
else
|
||||
|
|
|
@ -658,6 +658,18 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* Some devices have extra OF data and an OF-style MODALIAS */
|
||||
rc = of_device_uevent_modalias(dev, env);
|
||||
if (rc != -ENODEV)
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int mdio_bus_suspend(struct device *dev)
|
||||
{
|
||||
|
@ -708,6 +720,7 @@ static const struct dev_pm_ops mdio_bus_pm_ops = {
|
|||
struct bus_type mdio_bus_type = {
|
||||
.name = "mdio_bus",
|
||||
.match = mdio_bus_match,
|
||||
.uevent = mdio_uevent,
|
||||
.pm = MDIO_BUS_PM_OPS,
|
||||
};
|
||||
EXPORT_SYMBOL(mdio_bus_type);
|
||||
|
|
|
@ -268,11 +268,31 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Some config bits need to be set again on resume, handle them here. */
|
||||
static int kszphy_config_reset(struct phy_device *phydev)
|
||||
{
|
||||
struct kszphy_priv *priv = phydev->priv;
|
||||
int ret;
|
||||
|
||||
if (priv->rmii_ref_clk_sel) {
|
||||
ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
|
||||
if (ret) {
|
||||
phydev_err(phydev,
|
||||
"failed to set rmii reference clock\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->led_mode >= 0)
|
||||
kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kszphy_config_init(struct phy_device *phydev)
|
||||
{
|
||||
struct kszphy_priv *priv = phydev->priv;
|
||||
const struct kszphy_type *type;
|
||||
int ret;
|
||||
|
||||
if (!priv)
|
||||
return 0;
|
||||
|
@ -285,19 +305,7 @@ static int kszphy_config_init(struct phy_device *phydev)
|
|||
if (type->has_nand_tree_disable)
|
||||
kszphy_nand_tree_disable(phydev);
|
||||
|
||||
if (priv->rmii_ref_clk_sel) {
|
||||
ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
|
||||
if (ret) {
|
||||
phydev_err(phydev,
|
||||
"failed to set rmii reference clock\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->led_mode >= 0)
|
||||
kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
|
||||
|
||||
return 0;
|
||||
return kszphy_config_reset(phydev);
|
||||
}
|
||||
|
||||
static int ksz8041_config_init(struct phy_device *phydev)
|
||||
|
@ -700,8 +708,14 @@ static int kszphy_suspend(struct phy_device *phydev)
|
|||
|
||||
static int kszphy_resume(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
genphy_resume(phydev);
|
||||
|
||||
ret = kszphy_config_reset(phydev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Enable PHY Interrupts */
|
||||
if (phy_interrupt_is_valid(phydev)) {
|
||||
phydev->interrupts = PHY_INTERRUPT_ENABLED;
|
||||
|
|
|
@ -241,7 +241,7 @@ static const struct phy_setting settings[] = {
|
|||
* phy_lookup_setting - lookup a PHY setting
|
||||
* @speed: speed to match
|
||||
* @duplex: duplex to match
|
||||
* @feature: allowed link modes
|
||||
* @features: allowed link modes
|
||||
* @exact: an exact match is required
|
||||
*
|
||||
* Search the settings array for a setting that matches the speed and
|
||||
|
|
|
@ -869,7 +869,7 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
|
|||
unsigned int len;
|
||||
|
||||
len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
|
||||
rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len);
|
||||
rq->min_buf_len, PAGE_SIZE - hdr_len);
|
||||
return ALIGN(len, L1_CACHE_BYTES);
|
||||
}
|
||||
|
||||
|
@ -2144,7 +2144,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
|
|||
unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
|
||||
unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
|
||||
|
||||
return max(min_buf_len, hdr_len);
|
||||
return max(max(min_buf_len, hdr_len) - hdr_len,
|
||||
(unsigned int)GOOD_PACKET_LEN);
|
||||
}
|
||||
|
||||
static int virtnet_find_vqs(struct virtnet_info *vi)
|
||||
|
|
|
@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
|
|||
|
||||
static int vxlan_sock_add(struct vxlan_dev *vxlan);
|
||||
|
||||
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
|
||||
|
||||
/* per-network namespace private data for this module */
|
||||
struct vxlan_net {
|
||||
struct list_head vxlan_list;
|
||||
|
@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
|
|||
call_rcu(&f->rcu, vxlan_fdb_free);
|
||||
}
|
||||
|
||||
static void vxlan_dst_free(struct rcu_head *head)
|
||||
{
|
||||
struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
|
||||
|
||||
dst_cache_destroy(&rd->dst_cache);
|
||||
kfree(rd);
|
||||
}
|
||||
|
||||
static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
|
||||
struct vxlan_rdst *rd)
|
||||
{
|
||||
list_del_rcu(&rd->list);
|
||||
vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
|
||||
call_rcu(&rd->rcu, vxlan_dst_free);
|
||||
}
|
||||
|
||||
static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
|
||||
union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
|
||||
__be32 *vni, u32 *ifindex)
|
||||
|
@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
|
|||
* otherwise destroy the fdb entry
|
||||
*/
|
||||
if (rd && !list_is_singular(&f->remotes)) {
|
||||
list_del_rcu(&rd->list);
|
||||
vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
|
||||
kfree_rcu(rd, rcu);
|
||||
vxlan_fdb_dst_destroy(vxlan, f, rd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
|
|||
rcu_assign_pointer(vxlan->vn4_sock, NULL);
|
||||
synchronize_net();
|
||||
|
||||
vxlan_vs_del_dev(vxlan);
|
||||
|
||||
if (__vxlan_sock_release_prep(sock4)) {
|
||||
udp_tunnel_sock_release(sock4->sock);
|
||||
kfree(sock4);
|
||||
|
@ -2342,6 +2360,15 @@ static void vxlan_cleanup(unsigned long arg)
|
|||
mod_timer(&vxlan->age_timer, next_timer);
|
||||
}
|
||||
|
||||
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
hlist_del_init_rcu(&vxlan->hlist);
|
||||
spin_unlock(&vn->sock_lock);
|
||||
}
|
||||
|
||||
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
|
@ -3286,15 +3313,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
|
|||
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
|
||||
vxlan_flush(vxlan, true);
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
if (!hlist_unhashed(&vxlan->hlist))
|
||||
hlist_del_rcu(&vxlan->hlist);
|
||||
spin_unlock(&vn->sock_lock);
|
||||
|
||||
gro_cells_destroy(&vxlan->gro_cells);
|
||||
list_del(&vxlan->next);
|
||||
unregister_netdevice_queue(dev, head);
|
||||
|
|
|
@ -1271,6 +1271,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
|
|||
qcom_smem_state_put(wcn->tx_enable_state);
|
||||
qcom_smem_state_put(wcn->tx_rings_empty_state);
|
||||
|
||||
rpmsg_destroy_ept(wcn->smd_channel);
|
||||
|
||||
iounmap(wcn->dxe_base);
|
||||
iounmap(wcn->ccu_base);
|
||||
|
||||
|
|
|
@ -3422,7 +3422,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
|
|||
/* otherwise, set txglomalign */
|
||||
value = sdiodev->settings->bus.sdio.sd_sgentry_align;
|
||||
/* SDIO ADMA requires at least 32 bit alignment */
|
||||
value = max_t(u32, value, 4);
|
||||
value = max_t(u32, value, ALIGNMENT);
|
||||
err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
|
||||
sizeof(u32));
|
||||
}
|
||||
|
|
|
@ -79,8 +79,8 @@
|
|||
/* Lowest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MIN 17
|
||||
#define IWL7265_UCODE_API_MIN 17
|
||||
#define IWL7265D_UCODE_API_MIN 17
|
||||
#define IWL3168_UCODE_API_MIN 20
|
||||
#define IWL7265D_UCODE_API_MIN 22
|
||||
#define IWL3168_UCODE_API_MIN 22
|
||||
|
||||
/* NVM versions */
|
||||
#define IWL7260_NVM_VERSION 0x0a1d
|
||||
|
|
|
@ -74,8 +74,8 @@
|
|||
#define IWL8265_UCODE_API_MAX 30
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MIN 17
|
||||
#define IWL8265_UCODE_API_MIN 20
|
||||
#define IWL8000_UCODE_API_MIN 22
|
||||
#define IWL8265_UCODE_API_MIN 22
|
||||
|
||||
/* NVM versions */
|
||||
#define IWL8000_NVM_VERSION 0x0a1d
|
||||
|
|
|
@ -370,6 +370,7 @@
|
|||
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
|
||||
|
||||
#define DBGC_IN_SAMPLE (0xa03c00)
|
||||
#define DBGC_OUT_CTRL (0xa03c0c)
|
||||
|
||||
/* enable the ID buf for read */
|
||||
#define WFPM_PS_CTL_CLR 0xA0300C
|
||||
|
|
|
@ -307,6 +307,11 @@ enum {
|
|||
/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
|
||||
#define LQ_FLAG_COLOR_POS 1
|
||||
#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
|
||||
#define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\
|
||||
LQ_FLAG_COLOR_POS)
|
||||
#define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
|
||||
LQ_FLAG_COLOR_MSK)
|
||||
#define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
|
||||
|
||||
/* Bit 4-5: Tx RTS BW Signalling
|
||||
* (0) No RTS BW signalling
|
||||
|
|
|
@ -519,8 +519,11 @@ struct agg_tx_status {
|
|||
* bit-7 invalid rate indication
|
||||
*/
|
||||
#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
|
||||
#define TX_RES_RATE_TABLE_COLOR_POS 4
|
||||
#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
|
||||
#define TX_RES_INV_RATE_INDEX_MSK 0x80
|
||||
#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
|
||||
TX_RES_RATE_TABLE_COLOR_POS)
|
||||
|
||||
#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
|
||||
#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
|
||||
|
|
|
@ -1002,14 +1002,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
|
||||
{
|
||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
|
||||
iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
|
||||
else
|
||||
iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
|
||||
}
|
||||
|
||||
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
|
||||
{
|
||||
u8 *ptr;
|
||||
|
@ -1023,10 +1015,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
|
|||
/* EARLY START - firmware's configuration is hard coded */
|
||||
if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
|
||||
!mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
|
||||
conf_id == FW_DBG_START_FROM_ALIVE) {
|
||||
iwl_mvm_restart_early_start(mvm);
|
||||
conf_id == FW_DBG_START_FROM_ALIVE)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!mvm->fw->dbg_conf_tlv[conf_id])
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1040,7 +1040,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
|
|||
struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
|
||||
struct iwl_mac_beacon_cmd_v7 beacon_cmd;
|
||||
} u = {};
|
||||
struct iwl_mac_beacon_cmd beacon_cmd;
|
||||
struct iwl_mac_beacon_cmd beacon_cmd = {};
|
||||
struct ieee80211_tx_info *info;
|
||||
u32 beacon_skb_len;
|
||||
u32 rate, tx_flags;
|
||||
|
|
|
@ -1730,8 +1730,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
|
|||
*/
|
||||
static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
|
||||
{
|
||||
u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
|
||||
IWL_MVM_CMD_QUEUE;
|
||||
|
||||
return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
|
||||
~BIT(IWL_MVM_CMD_QUEUE));
|
||||
~BIT(cmd_queue));
|
||||
}
|
||||
|
||||
static inline
|
||||
|
@ -1753,6 +1756,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
|||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_free_fw_paging(mvm);
|
||||
mvm->ucode_loaded = false;
|
||||
mvm->fw_dbg_conf = FW_DBG_INVALID;
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
}
|
||||
|
||||
|
|
|
@ -1149,22 +1149,38 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
|
|||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
/* stop recording */
|
||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
||||
/* stop recording */
|
||||
iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
|
||||
|
||||
iwl_mvm_fw_error_dump(mvm);
|
||||
|
||||
/* start recording again if the firmware is not crashed */
|
||||
if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
|
||||
mvm->fw->dbg_dest_tlv)
|
||||
iwl_clear_bits_prph(mvm->trans,
|
||||
MON_BUFF_SAMPLE_CTL, 0x100);
|
||||
} else {
|
||||
u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
|
||||
u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
|
||||
|
||||
/* stop recording */
|
||||
iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
|
||||
/* wait before we collect the data till the DBGC stop */
|
||||
udelay(100);
|
||||
iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
|
||||
/* wait before we collect the data till the DBGC stop */
|
||||
udelay(500);
|
||||
|
||||
iwl_mvm_fw_error_dump(mvm);
|
||||
|
||||
/* start recording again if the firmware is not crashed */
|
||||
if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
|
||||
mvm->fw->dbg_dest_tlv) {
|
||||
iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
|
||||
iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
iwl_mvm_fw_error_dump(mvm);
|
||||
|
||||
/* start recording again if the firmware is not crashed */
|
||||
WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
|
||||
mvm->fw->dbg_dest_tlv &&
|
||||
iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -1083,34 +1083,6 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
|
|||
rs_get_lower_rate_in_column(lq_sta, rate);
|
||||
}
|
||||
|
||||
/* Check if both rates are identical
|
||||
* allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
|
||||
* with a rate indicating STBC/BFER and ANT_AB.
|
||||
*/
|
||||
static inline bool rs_rate_equal(struct rs_rate *a,
|
||||
struct rs_rate *b,
|
||||
bool allow_ant_mismatch)
|
||||
|
||||
{
|
||||
bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
|
||||
(a->bfer == b->bfer);
|
||||
|
||||
if (allow_ant_mismatch) {
|
||||
if (a->stbc || a->bfer) {
|
||||
WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
|
||||
a->stbc, a->bfer, a->ant);
|
||||
ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
|
||||
} else if (b->stbc || b->bfer) {
|
||||
WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
|
||||
b->stbc, b->bfer, b->ant);
|
||||
ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
|
||||
}
|
||||
}
|
||||
|
||||
return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
|
||||
(a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
|
||||
}
|
||||
|
||||
/* Check if both rates share the same column */
|
||||
static inline bool rs_rate_column_match(struct rs_rate *a,
|
||||
struct rs_rate *b)
|
||||
|
@ -1182,12 +1154,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
u32 lq_hwrate;
|
||||
struct rs_rate lq_rate, tx_resp_rate;
|
||||
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
|
||||
u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
|
||||
u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
|
||||
u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
|
||||
u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
|
||||
u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
|
||||
bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_LQ_SS_PARAMS);
|
||||
|
||||
/* Treat uninitialized rate scaling data same as non-existing. */
|
||||
if (!lq_sta) {
|
||||
|
@ -1262,10 +1234,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
|
||||
|
||||
/* Here we actually compare this rate to the latest LQ command */
|
||||
if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
|
||||
if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
|
||||
IWL_DEBUG_RATE(mvm,
|
||||
"initial tx resp rate 0x%x does not match 0x%x\n",
|
||||
tx_resp_hwrate, lq_hwrate);
|
||||
"tx resp color 0x%x does not match 0x%x\n",
|
||||
lq_color, LQ_FLAG_COLOR_GET(table->flags));
|
||||
|
||||
/*
|
||||
* Since rates mis-match, the last LQ command may have failed.
|
||||
|
@ -3326,6 +3298,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
|
|||
u8 valid_tx_ant = 0;
|
||||
struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
|
||||
bool toggle_ant = false;
|
||||
u32 color;
|
||||
|
||||
memcpy(&rate, initial_rate, sizeof(rate));
|
||||
|
||||
|
@ -3380,6 +3353,9 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
|
|||
num_rates, num_retries, valid_tx_ant,
|
||||
toggle_ant);
|
||||
|
||||
/* update the color of the LQ command (as a counter at bits 1-3) */
|
||||
color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
|
||||
lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
|
||||
}
|
||||
|
||||
struct rs_bfer_active_iter_data {
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
*
|
||||
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -357,6 +358,20 @@ struct iwl_lq_sta {
|
|||
} pers;
|
||||
};
|
||||
|
||||
/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
|
||||
* Note, it's iwlmvm <-> mac80211 interface.
|
||||
* bits 0-7: reduced tx power
|
||||
* bits 8-10: LQ command's color
|
||||
*/
|
||||
#define RS_DRV_DATA_TXP_MSK 0xff
|
||||
#define RS_DRV_DATA_LQ_COLOR_POS 8
|
||||
#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
|
||||
#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
|
||||
RS_DRV_DATA_LQ_COLOR_POS)
|
||||
#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
|
||||
(((uintptr_t)_p) |\
|
||||
((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
|
||||
|
||||
/* Initialize station's rate scaling information after adding station */
|
||||
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
enum nl80211_band band, bool init);
|
||||
|
|
|
@ -2120,7 +2120,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
|
||||
if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
|
||||
vif->type != NL80211_IFTYPE_ADHOC))
|
||||
return -ENOTSUPP;
|
||||
|
||||
/*
|
||||
|
@ -2155,6 +2156,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
mvmvif->cab_queue = queue;
|
||||
} else if (!fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_STA_TYPE)) {
|
||||
/*
|
||||
* In IBSS, ieee80211_check_queues() sets the cab_queue to be
|
||||
* invalid, so make sure we use the queue we want.
|
||||
* Note that this is done here as we want to avoid making DQA
|
||||
* changes in mac80211 layer.
|
||||
*/
|
||||
if (vif->type == NL80211_IFTYPE_ADHOC) {
|
||||
vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
|
||||
mvmvif->cab_queue = vif->cab_queue;
|
||||
}
|
||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
||||
&cfg, timeout);
|
||||
}
|
||||
|
@ -3321,18 +3332,15 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
|
|||
|
||||
/* Get the station from the mvm local station table */
|
||||
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
|
||||
if (!mvm_sta) {
|
||||
IWL_ERR(mvm, "Failed to find station\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
sta_id = mvm_sta->sta_id;
|
||||
if (mvm_sta)
|
||||
sta_id = mvm_sta->sta_id;
|
||||
|
||||
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
|
||||
keyconf->keyidx, sta_id);
|
||||
|
||||
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
|
||||
if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
|
||||
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
|
||||
|
||||
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
|
||||
|
|
|
@ -313,6 +313,7 @@ enum iwl_mvm_agg_state {
|
|||
* This is basically (last acked packet++).
|
||||
* @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
|
||||
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
|
||||
* @lq_color: the color of the LQ command as it appears in tx response.
|
||||
* @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
|
||||
* @state: state of the BA agreement establishment / tear down.
|
||||
* @txq_id: Tx queue used by the BA session / DQA
|
||||
|
@ -331,6 +332,7 @@ struct iwl_mvm_tid_data {
|
|||
u16 next_reclaimed;
|
||||
/* The rest is Tx AGG related */
|
||||
u32 rate_n_flags;
|
||||
u8 lq_color;
|
||||
bool amsdu_in_ampdu_allowed;
|
||||
enum iwl_mvm_agg_state state;
|
||||
u16 txq_id;
|
||||
|
|
|
@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
|
|||
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
|
||||
int ret;
|
||||
|
||||
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
|
||||
ret = -EIO;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
|
|
|
@ -1323,6 +1323,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
struct iwl_mvm_sta *mvmsta;
|
||||
struct sk_buff_head skbs;
|
||||
u8 skb_freed = 0;
|
||||
u8 lq_color;
|
||||
u16 next_reclaimed, seq_ctl;
|
||||
bool is_ndp = false;
|
||||
|
||||
|
@ -1405,8 +1406,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
info->status.tx_time =
|
||||
le16_to_cpu(tx_resp->wireless_media_time);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
|
||||
lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
|
||||
info->status.status_driver_data[0] =
|
||||
(void *)(uintptr_t)tx_resp->reduced_tpc;
|
||||
RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
|
||||
|
||||
ieee80211_tx_status(mvm->hw, skb);
|
||||
}
|
||||
|
@ -1638,6 +1640,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
|
|||
le32_to_cpu(tx_resp->initial_rate);
|
||||
mvmsta->tid_data[tid].tx_time =
|
||||
le16_to_cpu(tx_resp->wireless_media_time);
|
||||
mvmsta->tid_data[tid].lq_color =
|
||||
(tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
|
||||
TX_RES_RATE_TABLE_COLOR_POS;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -1707,6 +1712,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
|
|||
iwl_mvm_check_ratid_empty(mvm, sta, tid);
|
||||
|
||||
freed = 0;
|
||||
|
||||
/* pack lq color from tid_data along the reduced txp */
|
||||
ba_info->status.status_driver_data[0] =
|
||||
RS_DRV_DATA_PACK(tid_data->lq_color,
|
||||
ba_info->status.status_driver_data[0]);
|
||||
ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
|
||||
|
||||
skb_queue_walk(&reclaimed_skbs, skb) {
|
||||
|
|
|
@ -2803,7 +2803,8 @@ static struct iwl_trans_dump_data
|
|||
#ifdef CONFIG_PM_SLEEP
|
||||
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
|
||||
{
|
||||
if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
|
||||
if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
|
||||
(trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
|
||||
return iwl_pci_fw_enter_d0i3(trans);
|
||||
|
||||
return 0;
|
||||
|
@ -2811,7 +2812,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
|
|||
|
||||
static void iwl_trans_pcie_resume(struct iwl_trans *trans)
|
||||
{
|
||||
if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
|
||||
if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
|
||||
(trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
|
||||
iwl_pci_fw_exit_d0i3(trans);
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
|
|
@ -906,7 +906,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
|||
|
||||
if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
goto error_free_resp;
|
||||
}
|
||||
|
||||
rsp = (void *)hcmd.resp_pkt->data;
|
||||
|
@ -915,13 +915,13 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
|||
if (qid > ARRAY_SIZE(trans_pcie->txq)) {
|
||||
WARN_ONCE(1, "queue index %d unsupported", qid);
|
||||
ret = -EIO;
|
||||
goto error;
|
||||
goto error_free_resp;
|
||||
}
|
||||
|
||||
if (test_and_set_bit(qid, trans_pcie->queue_used)) {
|
||||
WARN_ONCE(1, "queue %d already used", qid);
|
||||
ret = -EIO;
|
||||
goto error;
|
||||
goto error_free_resp;
|
||||
}
|
||||
|
||||
txq->id = qid;
|
||||
|
@ -934,8 +934,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
|||
(txq->write_ptr) | (qid << 16));
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
|
||||
|
||||
iwl_free_resp(&hcmd);
|
||||
return qid;
|
||||
|
||||
error_free_resp:
|
||||
iwl_free_resp(&hcmd);
|
||||
error:
|
||||
iwl_pcie_gen2_txq_free_memory(trans, txq);
|
||||
return ret;
|
||||
|
|
|
@ -470,6 +470,7 @@ struct mlx4_update_qp_params {
|
|||
u16 rate_val;
|
||||
};
|
||||
|
||||
struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
|
||||
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
|
||||
enum mlx4_update_qp_attr attr,
|
||||
struct mlx4_update_qp_params *params);
|
||||
|
|
|
@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
|
|||
*/
|
||||
extern const struct proto_ops inet6_stream_ops;
|
||||
extern const struct proto_ops inet6_dgram_ops;
|
||||
extern const struct proto_ops inet6_sockraw_ops;
|
||||
|
||||
struct group_source_req;
|
||||
struct group_filter;
|
||||
|
|
|
@ -924,7 +924,7 @@ struct tcp_congestion_ops {
|
|||
void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
|
||||
/* call when ack arrives (optional) */
|
||||
void (*in_ack_event)(struct sock *sk, u32 flags);
|
||||
/* new value of cwnd after loss (optional) */
|
||||
/* new value of cwnd after loss (required) */
|
||||
u32 (*undo_cwnd)(struct sock *sk);
|
||||
/* hook for packet ack accounting (optional) */
|
||||
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
|
||||
|
|
|
@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br,
|
|||
err = 0;
|
||||
switch (nla_type(attr)) {
|
||||
case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
|
||||
if (!(p->flags & BR_VLAN_TUNNEL))
|
||||
if (!p || !(p->flags & BR_VLAN_TUNNEL))
|
||||
return -EINVAL;
|
||||
err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
|
||||
if (err)
|
||||
|
|
|
@ -179,7 +179,8 @@ static void br_stp_start(struct net_bridge *br)
|
|||
br_debug(br, "using kernel STP\n");
|
||||
|
||||
/* To start timers on any ports left in blocking */
|
||||
mod_timer(&br->hello_timer, jiffies + br->hello_time);
|
||||
if (br->dev->flags & IFF_UP)
|
||||
mod_timer(&br->hello_timer, jiffies + br->hello_time);
|
||||
br_port_state_selection(br);
|
||||
}
|
||||
|
||||
|
|
|
@ -1680,8 +1680,10 @@ start_again:
|
|||
|
||||
hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
|
||||
&devlink_nl_family, NLM_F_MULTI, cmd);
|
||||
if (!hdr)
|
||||
if (!hdr) {
|
||||
nlmsg_free(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
if (devlink_nl_put_handle(skb, devlink))
|
||||
goto nla_put_failure;
|
||||
|
@ -2098,8 +2100,10 @@ start_again:
|
|||
|
||||
hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
|
||||
&devlink_nl_family, NLM_F_MULTI, cmd);
|
||||
if (!hdr)
|
||||
if (!hdr) {
|
||||
nlmsg_free(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
if (devlink_nl_put_handle(skb, devlink))
|
||||
goto nla_put_failure;
|
||||
|
|
|
@ -3754,8 +3754,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
|
|||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
skb = __skb_dequeue(q);
|
||||
if (skb && (skb_next = skb_peek(q)))
|
||||
if (skb && (skb_next = skb_peek(q))) {
|
||||
icmp_next = is_icmp_err_skb(skb_next);
|
||||
if (icmp_next)
|
||||
sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
|
||||
}
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
|
||||
if (is_icmp_err_skb(skb) && !icmp_next)
|
||||
|
|
|
@ -223,6 +223,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
int dsa_switch_suspend(struct dsa_switch *ds)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
/* Suspend slave network devices */
|
||||
for (i = 0; i < ds->num_ports; i++) {
|
||||
if (!dsa_is_port_initialized(ds, i))
|
||||
continue;
|
||||
|
||||
ret = dsa_slave_suspend(ds->ports[i].netdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ds->ops->suspend)
|
||||
ret = ds->ops->suspend(ds);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dsa_switch_suspend);
|
||||
|
||||
int dsa_switch_resume(struct dsa_switch *ds)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
if (ds->ops->resume)
|
||||
ret = ds->ops->resume(ds);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Resume slave network devices */
|
||||
for (i = 0; i < ds->num_ports; i++) {
|
||||
if (!dsa_is_port_initialized(ds, i))
|
||||
continue;
|
||||
|
||||
ret = dsa_slave_resume(ds->ports[i].netdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dsa_switch_resume);
|
||||
#endif
|
||||
|
||||
static struct packet_type dsa_pack_type __read_mostly = {
|
||||
.type = cpu_to_be16(ETH_P_XDSA),
|
||||
.func = dsa_switch_rcv,
|
||||
|
|
|
@ -484,8 +484,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
|
|||
dsa_ds_unapply(dst, ds);
|
||||
}
|
||||
|
||||
if (dst->cpu_switch)
|
||||
if (dst->cpu_switch) {
|
||||
dsa_cpu_port_ethtool_restore(dst->cpu_switch);
|
||||
dst->cpu_switch = NULL;
|
||||
}
|
||||
|
||||
pr_info("DSA: tree %d unapplied\n", dst->tree);
|
||||
dst->applied = false;
|
||||
|
|
|
@ -289,53 +289,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
|
|||
dsa_switch_unregister_notifier(ds);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
int dsa_switch_suspend(struct dsa_switch *ds)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
/* Suspend slave network devices */
|
||||
for (i = 0; i < ds->num_ports; i++) {
|
||||
if (!dsa_is_port_initialized(ds, i))
|
||||
continue;
|
||||
|
||||
ret = dsa_slave_suspend(ds->ports[i].netdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ds->ops->suspend)
|
||||
ret = ds->ops->suspend(ds);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dsa_switch_suspend);
|
||||
|
||||
int dsa_switch_resume(struct dsa_switch *ds)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
if (ds->ops->resume)
|
||||
ret = ds->ops->resume(ds);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Resume slave network devices */
|
||||
for (i = 0; i < ds->num_ports; i++) {
|
||||
if (!dsa_is_port_initialized(ds, i))
|
||||
continue;
|
||||
|
||||
ret = dsa_slave_resume(ds->ports[i].netdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dsa_switch_resume);
|
||||
#endif
|
||||
|
||||
/* platform driver init and cleanup *****************************************/
|
||||
static int dev_is_class(struct device *dev, void *class)
|
||||
{
|
||||
|
|
|
@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
|
|||
.type = SOCK_DGRAM,
|
||||
.protocol = IPPROTO_ICMP,
|
||||
.prot = &ping_prot,
|
||||
.ops = &inet_dgram_ops,
|
||||
.ops = &inet_sockraw_ops,
|
||||
.flags = INET_PROTOSW_REUSE,
|
||||
},
|
||||
|
||||
|
|
|
@ -2381,9 +2381,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int tcp_repair_options_est(struct tcp_sock *tp,
|
||||
static int tcp_repair_options_est(struct sock *sk,
|
||||
struct tcp_repair_opt __user *optbuf, unsigned int len)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_repair_opt opt;
|
||||
|
||||
while (len >= sizeof(opt)) {
|
||||
|
@ -2396,6 +2397,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
|
|||
switch (opt.opt_code) {
|
||||
case TCPOPT_MSS:
|
||||
tp->rx_opt.mss_clamp = opt.opt_val;
|
||||
tcp_mtup_init(sk);
|
||||
break;
|
||||
case TCPOPT_WINDOW:
|
||||
{
|
||||
|
@ -2555,7 +2557,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
|||
if (!tp->repair)
|
||||
err = -EINVAL;
|
||||
else if (sk->sk_state == TCP_ESTABLISHED)
|
||||
err = tcp_repair_options_est(tp,
|
||||
err = tcp_repair_options_est(sk,
|
||||
(struct tcp_repair_opt __user *)optval,
|
||||
optlen);
|
||||
else
|
||||
|
|
|
@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
|
|||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
tcp_sk(sk)->prior_ssthresh = 0;
|
||||
if (icsk->icsk_ca_ops->init)
|
||||
icsk->icsk_ca_ops->init(sk);
|
||||
if (tcp_ca_needs_ecn(sk))
|
||||
|
|
|
@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
|
|||
struct ipv6hdr *ip6_hdr;
|
||||
struct ipv6_opt_hdr *hop;
|
||||
unsigned char buf[CALIPSO_MAX_BUFFER];
|
||||
int len_delta, new_end, pad;
|
||||
int len_delta, new_end, pad, payload;
|
||||
unsigned int start, end;
|
||||
|
||||
ip6_hdr = ipv6_hdr(skb);
|
||||
|
@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
|
|||
if (ret_val < 0)
|
||||
return ret_val;
|
||||
|
||||
ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
|
||||
|
||||
if (len_delta) {
|
||||
if (len_delta > 0)
|
||||
skb_push(skb, len_delta);
|
||||
|
@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
|
|||
sizeof(*ip6_hdr) + start);
|
||||
skb_reset_network_header(skb);
|
||||
ip6_hdr = ipv6_hdr(skb);
|
||||
payload = ntohs(ip6_hdr->payload_len);
|
||||
ip6_hdr->payload_len = htons(payload + len_delta);
|
||||
}
|
||||
|
||||
hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
|
||||
|
|
|
@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
|
|||
|
||||
if (udpfrag) {
|
||||
int err = ip6_find_1stfragopt(skb, &prevhdr);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
kfree_skb_list(segs);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
|
||||
fptr->frag_off = htons(offset);
|
||||
if (skb->next)
|
||||
|
|
|
@ -1095,6 +1095,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
|
|||
|
||||
if (!dst) {
|
||||
route_lookup:
|
||||
/* add dsfield to flowlabel for route lookup */
|
||||
fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
|
||||
|
||||
dst = ip6_route_output(net, NULL, fl6);
|
||||
|
||||
if (dst->error)
|
||||
|
|
|
@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
|
|||
.type = SOCK_DGRAM,
|
||||
.protocol = IPPROTO_ICMPV6,
|
||||
.prot = &pingv6_prot,
|
||||
.ops = &inet6_dgram_ops,
|
||||
.ops = &inet6_sockraw_ops,
|
||||
.flags = INET_PROTOSW_REUSE,
|
||||
};
|
||||
|
||||
|
|
|
@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
|
|||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
/* Same as inet6_dgram_ops, sans udp_poll. */
|
||||
static const struct proto_ops inet6_sockraw_ops = {
|
||||
const struct proto_ops inet6_sockraw_ops = {
|
||||
.family = PF_INET6,
|
||||
.owner = THIS_MODULE,
|
||||
.release = inet6_release,
|
||||
|
|
|
@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
iph = ipv6_hdr(skb);
|
||||
|
||||
hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
|
||||
if (hdr_len < 0)
|
||||
return hdr_len;
|
||||
skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
|
||||
skb_set_network_header(skb, -x->props.header_len);
|
||||
skb->transport_header = skb->network_header + hdr_len;
|
||||
|
|
|
@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
|
||||
|
||||
hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
|
||||
if (hdr_len < 0)
|
||||
return hdr_len;
|
||||
skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
|
||||
skb_set_network_header(skb, -x->props.header_len);
|
||||
skb->transport_header = skb->network_header + hdr_len;
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
|
||||
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
||||
* Copyright 2007-2010, Intel Corporation
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -741,7 +741,47 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
|
|||
ieee80211_agg_start_txq(sta, tid, true);
|
||||
}
|
||||
|
||||
void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
|
||||
void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
|
||||
struct tid_ampdu_tx *tid_tx)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
|
||||
if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
|
||||
return;
|
||||
|
||||
if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
|
||||
ieee80211_agg_tx_operational(local, sta, tid);
|
||||
}
|
||||
|
||||
static struct tid_ampdu_tx *
|
||||
ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
|
||||
const u8 *ra, u16 tid, struct sta_info **sta)
|
||||
{
|
||||
struct tid_ampdu_tx *tid_tx;
|
||||
|
||||
if (tid >= IEEE80211_NUM_TIDS) {
|
||||
ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
|
||||
tid, IEEE80211_NUM_TIDS);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*sta = sta_info_get_bss(sdata, ra);
|
||||
if (!*sta) {
|
||||
ht_dbg(sdata, "Could not find station: %pM\n", ra);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
|
||||
|
||||
if (WARN_ON(!tid_tx))
|
||||
ht_dbg(sdata, "addBA was not requested!\n");
|
||||
|
||||
return tid_tx;
|
||||
}
|
||||
|
||||
void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
|
||||
const u8 *ra, u16 tid)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
|
@ -750,57 +790,15 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
|
|||
|
||||
trace_api_start_tx_ba_cb(sdata, ra, tid);
|
||||
|
||||
if (tid >= IEEE80211_NUM_TIDS) {
|
||||
ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
|
||||
tid, IEEE80211_NUM_TIDS);
|
||||
return;
|
||||
}
|
||||
rcu_read_lock();
|
||||
tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
|
||||
if (!tid_tx)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&local->sta_mtx);
|
||||
sta = sta_info_get_bss(sdata, ra);
|
||||
if (!sta) {
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
ht_dbg(sdata, "Could not find station: %pM\n", ra);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&sta->ampdu_mlme.mtx);
|
||||
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
|
||||
|
||||
if (WARN_ON(!tid_tx)) {
|
||||
ht_dbg(sdata, "addBA was not requested!\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
|
||||
goto unlock;
|
||||
|
||||
if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
|
||||
ieee80211_agg_tx_operational(local, sta, tid);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&sta->ampdu_mlme.mtx);
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
}
|
||||
|
||||
void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
|
||||
const u8 *ra, u16 tid)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct ieee80211_ra_tid *ra_tid;
|
||||
struct sk_buff *skb = dev_alloc_skb(0);
|
||||
|
||||
if (unlikely(!skb))
|
||||
return;
|
||||
|
||||
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
|
||||
memcpy(&ra_tid->ra, ra, ETH_ALEN);
|
||||
ra_tid->tid = tid;
|
||||
|
||||
skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
|
||||
skb_queue_tail(&sdata->skb_queue, skb);
|
||||
ieee80211_queue_work(&local->hw, &sdata->work);
|
||||
set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
|
||||
ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
|
||||
|
||||
|
@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
|
|||
}
|
||||
EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
|
||||
|
||||
void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
|
||||
void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
|
||||
struct tid_ampdu_tx *tid_tx)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct sta_info *sta;
|
||||
struct tid_ampdu_tx *tid_tx;
|
||||
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
||||
bool send_delba = false;
|
||||
|
||||
trace_api_stop_tx_ba_cb(sdata, ra, tid);
|
||||
ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
|
||||
sta->sta.addr, tid);
|
||||
|
||||
if (tid >= IEEE80211_NUM_TIDS) {
|
||||
ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
|
||||
tid, IEEE80211_NUM_TIDS);
|
||||
return;
|
||||
}
|
||||
|
||||
ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
|
||||
|
||||
mutex_lock(&local->sta_mtx);
|
||||
|
||||
sta = sta_info_get_bss(sdata, ra);
|
||||
if (!sta) {
|
||||
ht_dbg(sdata, "Could not find station: %pM\n", ra);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
mutex_lock(&sta->ampdu_mlme.mtx);
|
||||
spin_lock_bh(&sta->lock);
|
||||
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
|
||||
|
||||
if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
|
||||
if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
|
||||
ht_dbg(sdata,
|
||||
"unexpected callback to A-MPDU stop for %pM tid %d\n",
|
||||
sta->sta.addr, tid);
|
||||
|
@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
|
|||
spin_unlock_bh(&sta->lock);
|
||||
|
||||
if (send_delba)
|
||||
ieee80211_send_delba(sdata, ra, tid,
|
||||
ieee80211_send_delba(sdata, sta->sta.addr, tid,
|
||||
WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
|
||||
|
||||
mutex_unlock(&sta->ampdu_mlme.mtx);
|
||||
unlock:
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
}
|
||||
|
||||
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
|
||||
|
@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
|
|||
{
|
||||
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct ieee80211_ra_tid *ra_tid;
|
||||
struct sk_buff *skb = dev_alloc_skb(0);
|
||||
struct sta_info *sta;
|
||||
struct tid_ampdu_tx *tid_tx;
|
||||
|
||||
if (unlikely(!skb))
|
||||
return;
|
||||
trace_api_stop_tx_ba_cb(sdata, ra, tid);
|
||||
|
||||
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
|
||||
memcpy(&ra_tid->ra, ra, ETH_ALEN);
|
||||
ra_tid->tid = tid;
|
||||
rcu_read_lock();
|
||||
tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
|
||||
if (!tid_tx)
|
||||
goto out;
|
||||
|
||||
skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
|
||||
skb_queue_tail(&sdata->skb_queue, skb);
|
||||
ieee80211_queue_work(&local->hw, &sdata->work);
|
||||
set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
|
||||
ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
|
||||
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
||||
* Copyright 2007-2010, Intel Corporation
|
||||
* Copyright 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
|
|||
{
|
||||
int i;
|
||||
|
||||
cancel_work_sync(&sta->ampdu_mlme.work);
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
|
||||
__ieee80211_stop_tx_ba_session(sta, i, reason);
|
||||
__ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
|
||||
|
@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
|
|||
reason != AGG_STOP_DESTROY_STA &&
|
||||
reason != AGG_STOP_PEER_REQUEST);
|
||||
}
|
||||
|
||||
/* stopping might queue the work again - so cancel only afterwards */
|
||||
cancel_work_sync(&sta->ampdu_mlme.work);
|
||||
}
|
||||
|
||||
void ieee80211_ba_session_work(struct work_struct *work)
|
||||
|
@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
|
|||
spin_unlock_bh(&sta->lock);
|
||||
|
||||
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
|
||||
if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
|
||||
&tid_tx->state))
|
||||
if (!tid_tx)
|
||||
continue;
|
||||
|
||||
if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
|
||||
ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
|
||||
if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
|
||||
___ieee80211_stop_tx_ba_session(sta, tid,
|
||||
AGG_STOP_LOCAL_REQUEST);
|
||||
if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
|
||||
ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
|
||||
}
|
||||
mutex_unlock(&sta->ampdu_mlme.mtx);
|
||||
}
|
||||
|
|
|
@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg {
|
|||
|
||||
enum sdata_queue_type {
|
||||
IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
|
||||
IEEE80211_SDATA_QUEUE_AGG_START = 1,
|
||||
IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
|
||||
IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
|
||||
IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
|
||||
};
|
||||
|
@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
|
|||
return local->hw.wiphy->bands[band];
|
||||
}
|
||||
|
||||
/* this struct represents 802.11n's RA/TID combination */
|
||||
struct ieee80211_ra_tid {
|
||||
u8 ra[ETH_ALEN];
|
||||
u16 tid;
|
||||
};
|
||||
|
||||
/* this struct holds the value parsing from channel switch IE */
|
||||
struct ieee80211_csa_ie {
|
||||
struct cfg80211_chan_def chandef;
|
||||
|
@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
|||
enum ieee80211_agg_stop_reason reason);
|
||||
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
||||
enum ieee80211_agg_stop_reason reason);
|
||||
void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
|
||||
void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
|
||||
void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
|
||||
struct tid_ampdu_tx *tid_tx);
|
||||
void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
|
||||
struct tid_ampdu_tx *tid_tx);
|
||||
void ieee80211_ba_session_work(struct work_struct *work);
|
||||
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
|
||||
void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
|
||||
|
|
|
@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work)
|
|||
struct ieee80211_local *local = sdata->local;
|
||||
struct sk_buff *skb;
|
||||
struct sta_info *sta;
|
||||
struct ieee80211_ra_tid *ra_tid;
|
||||
struct ieee80211_rx_agg *rx_agg;
|
||||
|
||||
if (!ieee80211_sdata_running(sdata))
|
||||
|
@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work)
|
|||
while ((skb = skb_dequeue(&sdata->skb_queue))) {
|
||||
struct ieee80211_mgmt *mgmt = (void *)skb->data;
|
||||
|
||||
if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
|
||||
ra_tid = (void *)&skb->cb;
|
||||
ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
|
||||
ra_tid->tid);
|
||||
} else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
|
||||
ra_tid = (void *)&skb->cb;
|
||||
ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
|
||||
ra_tid->tid);
|
||||
} else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
|
||||
if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
|
||||
rx_agg = (void *)&skb->cb;
|
||||
mutex_lock(&local->sta_mtx);
|
||||
sta = sta_info_get_bss(sdata, rx_agg->addr);
|
||||
|
|
|
@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
|
|||
struct ieee80211_sta_rx_stats *cpurxs;
|
||||
|
||||
cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
|
||||
sinfo->rx_packets += cpurxs->dropped;
|
||||
sinfo->rx_dropped_misc += cpurxs->dropped;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags {
|
|||
#define HT_AGG_STATE_STOPPING 3
|
||||
#define HT_AGG_STATE_WANT_START 4
|
||||
#define HT_AGG_STATE_WANT_STOP 5
|
||||
#define HT_AGG_STATE_START_CB 6
|
||||
#define HT_AGG_STATE_STOP_CB 7
|
||||
|
||||
enum ieee80211_agg_stop_reason {
|
||||
AGG_STOP_DECLINED,
|
||||
|
|
|
@ -1418,7 +1418,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
|
|||
continue;
|
||||
alive++;
|
||||
nh_flags &= ~flags;
|
||||
WRITE_ONCE(nh->nh_flags, flags);
|
||||
WRITE_ONCE(nh->nh_flags, nh_flags);
|
||||
} endfor_nexthops(rt);
|
||||
|
||||
WRITE_ONCE(rt->rt_nhn_alive, alive);
|
||||
|
|
|
@ -890,8 +890,13 @@ restart:
|
|||
}
|
||||
out:
|
||||
local_bh_enable();
|
||||
if (last)
|
||||
if (last) {
|
||||
/* nf ct hash resize happened, now clear the leftover. */
|
||||
if ((struct nf_conn *)cb->args[1] == last)
|
||||
cb->args[1] = 0;
|
||||
|
||||
nf_ct_put(last);
|
||||
}
|
||||
|
||||
while (i) {
|
||||
i--;
|
||||
|
|
|
@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
|
|||
u8 pf, unsigned int hooknum)
|
||||
{
|
||||
const struct sctphdr *sh;
|
||||
struct sctphdr _sctph;
|
||||
const char *logmsg;
|
||||
|
||||
sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
|
||||
if (!sh) {
|
||||
if (skb->len < dataoff + sizeof(struct sctphdr)) {
|
||||
logmsg = "nf_ct_sctp: short packet ";
|
||||
goto out_invalid;
|
||||
}
|
||||
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
|
||||
skb->ip_summed == CHECKSUM_NONE) {
|
||||
if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
|
||||
logmsg = "nf_ct_sctp: failed to read header ";
|
||||
goto out_invalid;
|
||||
}
|
||||
sh = (const struct sctphdr *)(skb->data + dataoff);
|
||||
if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
|
||||
logmsg = "nf_ct_sctp: bad CRC ";
|
||||
goto out_invalid;
|
||||
|
|
|
@ -566,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
|
|||
* Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
|
||||
* will delete entry from already-freed table.
|
||||
*/
|
||||
ct->status &= ~IPS_NAT_DONE_MASK;
|
||||
clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
|
||||
rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
|
||||
nf_nat_bysource_params);
|
||||
|
||||
|
|
|
@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
|||
else if (d > 0)
|
||||
p = &parent->rb_right;
|
||||
else {
|
||||
if (nft_set_elem_active(&rbe->ext, genmask)) {
|
||||
if (nft_rbtree_interval_end(rbe) &&
|
||||
!nft_rbtree_interval_end(new))
|
||||
p = &parent->rb_left;
|
||||
else if (!nft_rbtree_interval_end(rbe) &&
|
||||
nft_rbtree_interval_end(new))
|
||||
p = &parent->rb_right;
|
||||
else {
|
||||
*ext = &rbe->ext;
|
||||
return -EEXIST;
|
||||
}
|
||||
if (nft_rbtree_interval_end(rbe) &&
|
||||
!nft_rbtree_interval_end(new)) {
|
||||
p = &parent->rb_left;
|
||||
} else if (!nft_rbtree_interval_end(rbe) &&
|
||||
nft_rbtree_interval_end(new)) {
|
||||
p = &parent->rb_right;
|
||||
} else if (nft_set_elem_active(&rbe->ext, genmask)) {
|
||||
*ext = &rbe->ext;
|
||||
return -EEXIST;
|
||||
} else {
|
||||
p = &parent->rb_left;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,6 +62,7 @@
|
|||
#include <asm/cacheflush.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/genetlink.h>
|
||||
#include <linux/net_namespace.h>
|
||||
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/sock.h>
|
||||
|
@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk,
|
|||
goto out;
|
||||
}
|
||||
NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
|
||||
NETLINK_CB(p->skb2).nsid_is_set = true;
|
||||
if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
|
||||
NETLINK_CB(p->skb2).nsid_is_set = true;
|
||||
val = netlink_broadcast_deliver(sk, p->skb2);
|
||||
if (val < 0) {
|
||||
netlink_overrun(sk);
|
||||
|
|
Loading…
Reference in New Issue