Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
dc2e0617f1
|
@ -106,6 +106,12 @@ properties:
|
||||||
phy-mode:
|
phy-mode:
|
||||||
$ref: "#/properties/phy-connection-type"
|
$ref: "#/properties/phy-connection-type"
|
||||||
|
|
||||||
|
pcs-handle:
|
||||||
|
$ref: /schemas/types.yaml#/definitions/phandle
|
||||||
|
description:
|
||||||
|
Specifies a reference to a node representing a PCS PHY device on a MDIO
|
||||||
|
bus to link with an external PHY (phy-handle) if exists.
|
||||||
|
|
||||||
phy-handle:
|
phy-handle:
|
||||||
$ref: /schemas/types.yaml#/definitions/phandle
|
$ref: /schemas/types.yaml#/definitions/phandle
|
||||||
description:
|
description:
|
||||||
|
|
|
@ -45,20 +45,3 @@ Optional properties:
|
||||||
|
|
||||||
In fiber mode, auto-negotiation is disabled and the PHY can only work in
|
In fiber mode, auto-negotiation is disabled and the PHY can only work in
|
||||||
100base-fx (full and half duplex) modes.
|
100base-fx (full and half duplex) modes.
|
||||||
|
|
||||||
- lan8814,ignore-ts: If present the PHY will not support timestamping.
|
|
||||||
|
|
||||||
This option acts as check whether Timestamping is supported by
|
|
||||||
hardware or not. LAN8814 phy support hardware tmestamping.
|
|
||||||
|
|
||||||
- lan8814,latency_rx_10: Configures Latency value of phy in ingress at 10 Mbps.
|
|
||||||
|
|
||||||
- lan8814,latency_tx_10: Configures Latency value of phy in egress at 10 Mbps.
|
|
||||||
|
|
||||||
- lan8814,latency_rx_100: Configures Latency value of phy in ingress at 100 Mbps.
|
|
||||||
|
|
||||||
- lan8814,latency_tx_100: Configures Latency value of phy in egress at 100 Mbps.
|
|
||||||
|
|
||||||
- lan8814,latency_rx_1000: Configures Latency value of phy in ingress at 1000 Mbps.
|
|
||||||
|
|
||||||
- lan8814,latency_tx_1000: Configures Latency value of phy in egress at 1000 Mbps.
|
|
||||||
|
|
|
@ -26,7 +26,8 @@ Required properties:
|
||||||
specified, the TX/RX DMA interrupts should be on that node
|
specified, the TX/RX DMA interrupts should be on that node
|
||||||
instead, and only the Ethernet core interrupt is optionally
|
instead, and only the Ethernet core interrupt is optionally
|
||||||
specified here.
|
specified here.
|
||||||
- phy-handle : Should point to the external phy device.
|
- phy-handle : Should point to the external phy device if exists. Pointing
|
||||||
|
this to the PCS/PMA PHY is deprecated and should be avoided.
|
||||||
See ethernet.txt file in the same directory.
|
See ethernet.txt file in the same directory.
|
||||||
- xlnx,rxmem : Set to allocated memory buffer for Rx/Tx in the hardware
|
- xlnx,rxmem : Set to allocated memory buffer for Rx/Tx in the hardware
|
||||||
|
|
||||||
|
@ -68,6 +69,11 @@ Optional properties:
|
||||||
required through the core's MDIO interface (i.e. always,
|
required through the core's MDIO interface (i.e. always,
|
||||||
unless the PHY is accessed through a different bus).
|
unless the PHY is accessed through a different bus).
|
||||||
|
|
||||||
|
- pcs-handle: Phandle to the internal PCS/PMA PHY in SGMII or 1000Base-X
|
||||||
|
modes, where "pcs-handle" should be used to point
|
||||||
|
to the PCS/PMA PHY, and "phy-handle" should point to an
|
||||||
|
external PHY if exists.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
axi_ethernet_eth: ethernet@40c00000 {
|
axi_ethernet_eth: ethernet@40c00000 {
|
||||||
compatible = "xlnx,axi-ethernet-1.00.a";
|
compatible = "xlnx,axi-ethernet-1.00.a";
|
||||||
|
|
|
@ -10,21 +10,21 @@ in joining the effort.
|
||||||
Design principles
|
Design principles
|
||||||
=================
|
=================
|
||||||
|
|
||||||
The Distributed Switch Architecture is a subsystem which was primarily designed
|
The Distributed Switch Architecture subsystem was primarily designed to
|
||||||
to support Marvell Ethernet switches (MV88E6xxx, a.k.a Linkstreet product line)
|
support Marvell Ethernet switches (MV88E6xxx, a.k.a. Link Street product
|
||||||
using Linux, but has since evolved to support other vendors as well.
|
line) using Linux, but has since evolved to support other vendors as well.
|
||||||
|
|
||||||
The original philosophy behind this design was to be able to use unmodified
|
The original philosophy behind this design was to be able to use unmodified
|
||||||
Linux tools such as bridge, iproute2, ifconfig to work transparently whether
|
Linux tools such as bridge, iproute2, ifconfig to work transparently whether
|
||||||
they configured/queried a switch port network device or a regular network
|
they configured/queried a switch port network device or a regular network
|
||||||
device.
|
device.
|
||||||
|
|
||||||
An Ethernet switch is typically comprised of multiple front-panel ports, and one
|
An Ethernet switch typically comprises multiple front-panel ports and one
|
||||||
or more CPU or management port. The DSA subsystem currently relies on the
|
or more CPU or management ports. The DSA subsystem currently relies on the
|
||||||
presence of a management port connected to an Ethernet controller capable of
|
presence of a management port connected to an Ethernet controller capable of
|
||||||
receiving Ethernet frames from the switch. This is a very common setup for all
|
receiving Ethernet frames from the switch. This is a very common setup for all
|
||||||
kinds of Ethernet switches found in Small Home and Office products: routers,
|
kinds of Ethernet switches found in Small Home and Office products: routers,
|
||||||
gateways, or even top-of-the rack switches. This host Ethernet controller will
|
gateways, or even top-of-rack switches. This host Ethernet controller will
|
||||||
be later referred to as "master" and "cpu" in DSA terminology and code.
|
be later referred to as "master" and "cpu" in DSA terminology and code.
|
||||||
|
|
||||||
The D in DSA stands for Distributed, because the subsystem has been designed
|
The D in DSA stands for Distributed, because the subsystem has been designed
|
||||||
|
@ -33,14 +33,14 @@ using upstream and downstream Ethernet links between switches. These specific
|
||||||
ports are referred to as "dsa" ports in DSA terminology and code. A collection
|
ports are referred to as "dsa" ports in DSA terminology and code. A collection
|
||||||
of multiple switches connected to each other is called a "switch tree".
|
of multiple switches connected to each other is called a "switch tree".
|
||||||
|
|
||||||
For each front-panel port, DSA will create specialized network devices which are
|
For each front-panel port, DSA creates specialized network devices which are
|
||||||
used as controlling and data-flowing endpoints for use by the Linux networking
|
used as controlling and data-flowing endpoints for use by the Linux networking
|
||||||
stack. These specialized network interfaces are referred to as "slave" network
|
stack. These specialized network interfaces are referred to as "slave" network
|
||||||
interfaces in DSA terminology and code.
|
interfaces in DSA terminology and code.
|
||||||
|
|
||||||
The ideal case for using DSA is when an Ethernet switch supports a "switch tag"
|
The ideal case for using DSA is when an Ethernet switch supports a "switch tag"
|
||||||
which is a hardware feature making the switch insert a specific tag for each
|
which is a hardware feature making the switch insert a specific tag for each
|
||||||
Ethernet frames it received to/from specific ports to help the management
|
Ethernet frame it receives to/from specific ports to help the management
|
||||||
interface figure out:
|
interface figure out:
|
||||||
|
|
||||||
- what port is this frame coming from
|
- what port is this frame coming from
|
||||||
|
@ -125,7 +125,7 @@ other switches from the same fabric, and in this case, the outermost switch
|
||||||
ports must decapsulate the packet.
|
ports must decapsulate the packet.
|
||||||
|
|
||||||
Note that in certain cases, it might be the case that the tagging format used
|
Note that in certain cases, it might be the case that the tagging format used
|
||||||
by a leaf switch (not connected directly to the CPU) to not be the same as what
|
by a leaf switch (not connected directly to the CPU) is not the same as what
|
||||||
the network stack sees. This can be seen with Marvell switch trees, where the
|
the network stack sees. This can be seen with Marvell switch trees, where the
|
||||||
CPU port can be configured to use either the DSA or the Ethertype DSA (EDSA)
|
CPU port can be configured to use either the DSA or the Ethertype DSA (EDSA)
|
||||||
format, but the DSA links are configured to use the shorter (without Ethertype)
|
format, but the DSA links are configured to use the shorter (without Ethertype)
|
||||||
|
@ -270,21 +270,21 @@ These interfaces are specialized in order to:
|
||||||
to/from specific switch ports
|
to/from specific switch ports
|
||||||
- query the switch for ethtool operations: statistics, link state,
|
- query the switch for ethtool operations: statistics, link state,
|
||||||
Wake-on-LAN, register dumps...
|
Wake-on-LAN, register dumps...
|
||||||
- external/internal PHY management: link, auto-negotiation etc.
|
- manage external/internal PHY: link, auto-negotiation, etc.
|
||||||
|
|
||||||
These slave network devices have custom net_device_ops and ethtool_ops function
|
These slave network devices have custom net_device_ops and ethtool_ops function
|
||||||
pointers which allow DSA to introduce a level of layering between the networking
|
pointers which allow DSA to introduce a level of layering between the networking
|
||||||
stack/ethtool, and the switch driver implementation.
|
stack/ethtool and the switch driver implementation.
|
||||||
|
|
||||||
Upon frame transmission from these slave network devices, DSA will look up which
|
Upon frame transmission from these slave network devices, DSA will look up which
|
||||||
switch tagging protocol is currently registered with these network devices, and
|
switch tagging protocol is currently registered with these network devices and
|
||||||
invoke a specific transmit routine which takes care of adding the relevant
|
invoke a specific transmit routine which takes care of adding the relevant
|
||||||
switch tag in the Ethernet frames.
|
switch tag in the Ethernet frames.
|
||||||
|
|
||||||
These frames are then queued for transmission using the master network device
|
These frames are then queued for transmission using the master network device
|
||||||
``ndo_start_xmit()`` function, since they contain the appropriate switch tag, the
|
``ndo_start_xmit()`` function. Since they contain the appropriate switch tag, the
|
||||||
Ethernet switch will be able to process these incoming frames from the
|
Ethernet switch will be able to process these incoming frames from the
|
||||||
management interface and delivers these frames to the physical switch port.
|
management interface and deliver them to the physical switch port.
|
||||||
|
|
||||||
Graphical representation
|
Graphical representation
|
||||||
------------------------
|
------------------------
|
||||||
|
@ -330,9 +330,9 @@ MDIO reads/writes towards specific PHY addresses. In most MDIO-connected
|
||||||
switches, these functions would utilize direct or indirect PHY addressing mode
|
switches, these functions would utilize direct or indirect PHY addressing mode
|
||||||
to return standard MII registers from the switch builtin PHYs, allowing the PHY
|
to return standard MII registers from the switch builtin PHYs, allowing the PHY
|
||||||
library and/or to return link status, link partner pages, auto-negotiation
|
library and/or to return link status, link partner pages, auto-negotiation
|
||||||
results etc..
|
results, etc.
|
||||||
|
|
||||||
For Ethernet switches which have both external and internal MDIO busses, the
|
For Ethernet switches which have both external and internal MDIO buses, the
|
||||||
slave MII bus can be utilized to mux/demux MDIO reads and writes towards either
|
slave MII bus can be utilized to mux/demux MDIO reads and writes towards either
|
||||||
internal or external MDIO devices this switch might be connected to: internal
|
internal or external MDIO devices this switch might be connected to: internal
|
||||||
PHYs, external PHYs, or even external switches.
|
PHYs, external PHYs, or even external switches.
|
||||||
|
@ -349,7 +349,7 @@ DSA data structures are defined in ``include/net/dsa.h`` as well as
|
||||||
table indication (when cascading switches)
|
table indication (when cascading switches)
|
||||||
|
|
||||||
- ``dsa_platform_data``: platform device configuration data which can reference
|
- ``dsa_platform_data``: platform device configuration data which can reference
|
||||||
a collection of dsa_chip_data structure if multiples switches are cascaded,
|
a collection of dsa_chip_data structures if multiple switches are cascaded,
|
||||||
the master network device this switch tree is attached to needs to be
|
the master network device this switch tree is attached to needs to be
|
||||||
referenced
|
referenced
|
||||||
|
|
||||||
|
@ -426,7 +426,7 @@ logic basically looks like this:
|
||||||
"phy-handle" property, if found, this PHY device is created and registered
|
"phy-handle" property, if found, this PHY device is created and registered
|
||||||
using ``of_phy_connect()``
|
using ``of_phy_connect()``
|
||||||
|
|
||||||
- if Device Tree is used, and the PHY device is "fixed", that is, conforms to
|
- if Device Tree is used and the PHY device is "fixed", that is, conforms to
|
||||||
the definition of a non-MDIO managed PHY as defined in
|
the definition of a non-MDIO managed PHY as defined in
|
||||||
``Documentation/devicetree/bindings/net/fixed-link.txt``, the PHY is registered
|
``Documentation/devicetree/bindings/net/fixed-link.txt``, the PHY is registered
|
||||||
and connected transparently using the special fixed MDIO bus driver
|
and connected transparently using the special fixed MDIO bus driver
|
||||||
|
@ -481,7 +481,7 @@ Device Tree
|
||||||
DSA features a standardized binding which is documented in
|
DSA features a standardized binding which is documented in
|
||||||
``Documentation/devicetree/bindings/net/dsa/dsa.txt``. PHY/MDIO library helper
|
``Documentation/devicetree/bindings/net/dsa/dsa.txt``. PHY/MDIO library helper
|
||||||
functions such as ``of_get_phy_mode()``, ``of_phy_connect()`` are also used to query
|
functions such as ``of_get_phy_mode()``, ``of_phy_connect()`` are also used to query
|
||||||
per-port PHY specific details: interface connection, MDIO bus location etc..
|
per-port PHY specific details: interface connection, MDIO bus location, etc.
|
||||||
|
|
||||||
Driver development
|
Driver development
|
||||||
==================
|
==================
|
||||||
|
@ -509,7 +509,7 @@ Switch configuration
|
||||||
|
|
||||||
- ``setup``: setup function for the switch, this function is responsible for setting
|
- ``setup``: setup function for the switch, this function is responsible for setting
|
||||||
up the ``dsa_switch_ops`` private structure with all it needs: register maps,
|
up the ``dsa_switch_ops`` private structure with all it needs: register maps,
|
||||||
interrupts, mutexes, locks etc.. This function is also expected to properly
|
interrupts, mutexes, locks, etc. This function is also expected to properly
|
||||||
configure the switch to separate all network interfaces from each other, that
|
configure the switch to separate all network interfaces from each other, that
|
||||||
is, they should be isolated by the switch hardware itself, typically by creating
|
is, they should be isolated by the switch hardware itself, typically by creating
|
||||||
a Port-based VLAN ID for each port and allowing only the CPU port and the
|
a Port-based VLAN ID for each port and allowing only the CPU port and the
|
||||||
|
@ -526,13 +526,13 @@ PHY devices and link management
|
||||||
- ``get_phy_flags``: Some switches are interfaced to various kinds of Ethernet PHYs,
|
- ``get_phy_flags``: Some switches are interfaced to various kinds of Ethernet PHYs,
|
||||||
if the PHY library PHY driver needs to know about information it cannot obtain
|
if the PHY library PHY driver needs to know about information it cannot obtain
|
||||||
on its own (e.g.: coming from switch memory mapped registers), this function
|
on its own (e.g.: coming from switch memory mapped registers), this function
|
||||||
should return a 32-bits bitmask of "flags", that is private between the switch
|
should return a 32-bit bitmask of "flags" that is private between the switch
|
||||||
driver and the Ethernet PHY driver in ``drivers/net/phy/\*``.
|
driver and the Ethernet PHY driver in ``drivers/net/phy/\*``.
|
||||||
|
|
||||||
- ``phy_read``: Function invoked by the DSA slave MDIO bus when attempting to read
|
- ``phy_read``: Function invoked by the DSA slave MDIO bus when attempting to read
|
||||||
the switch port MDIO registers. If unavailable, return 0xffff for each read.
|
the switch port MDIO registers. If unavailable, return 0xffff for each read.
|
||||||
For builtin switch Ethernet PHYs, this function should allow reading the link
|
For builtin switch Ethernet PHYs, this function should allow reading the link
|
||||||
status, auto-negotiation results, link partner pages etc..
|
status, auto-negotiation results, link partner pages, etc.
|
||||||
|
|
||||||
- ``phy_write``: Function invoked by the DSA slave MDIO bus when attempting to write
|
- ``phy_write``: Function invoked by the DSA slave MDIO bus when attempting to write
|
||||||
to the switch port MDIO registers. If unavailable return a negative error
|
to the switch port MDIO registers. If unavailable return a negative error
|
||||||
|
@ -554,7 +554,7 @@ Ethtool operations
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
- ``get_strings``: ethtool function used to query the driver's strings, will
|
- ``get_strings``: ethtool function used to query the driver's strings, will
|
||||||
typically return statistics strings, private flags strings etc.
|
typically return statistics strings, private flags strings, etc.
|
||||||
|
|
||||||
- ``get_ethtool_stats``: ethtool function used to query per-port statistics and
|
- ``get_ethtool_stats``: ethtool function used to query per-port statistics and
|
||||||
return their values. DSA overlays slave network devices general statistics:
|
return their values. DSA overlays slave network devices general statistics:
|
||||||
|
@ -564,7 +564,7 @@ Ethtool operations
|
||||||
- ``get_sset_count``: ethtool function used to query the number of statistics items
|
- ``get_sset_count``: ethtool function used to query the number of statistics items
|
||||||
|
|
||||||
- ``get_wol``: ethtool function used to obtain Wake-on-LAN settings per-port, this
|
- ``get_wol``: ethtool function used to obtain Wake-on-LAN settings per-port, this
|
||||||
function may, for certain implementations also query the master network device
|
function may for certain implementations also query the master network device
|
||||||
Wake-on-LAN settings if this interface needs to participate in Wake-on-LAN
|
Wake-on-LAN settings if this interface needs to participate in Wake-on-LAN
|
||||||
|
|
||||||
- ``set_wol``: ethtool function used to configure Wake-on-LAN settings per-port,
|
- ``set_wol``: ethtool function used to configure Wake-on-LAN settings per-port,
|
||||||
|
@ -607,14 +607,14 @@ Power management
|
||||||
in a fully active state
|
in a fully active state
|
||||||
|
|
||||||
- ``port_enable``: function invoked by the DSA slave network device ndo_open
|
- ``port_enable``: function invoked by the DSA slave network device ndo_open
|
||||||
function when a port is administratively brought up, this function should be
|
function when a port is administratively brought up, this function should
|
||||||
fully enabling a given switch port. DSA takes care of marking the port with
|
fully enable a given switch port. DSA takes care of marking the port with
|
||||||
``BR_STATE_BLOCKING`` if the port is a bridge member, or ``BR_STATE_FORWARDING`` if it
|
``BR_STATE_BLOCKING`` if the port is a bridge member, or ``BR_STATE_FORWARDING`` if it
|
||||||
was not, and propagating these changes down to the hardware
|
was not, and propagating these changes down to the hardware
|
||||||
|
|
||||||
- ``port_disable``: function invoked by the DSA slave network device ndo_close
|
- ``port_disable``: function invoked by the DSA slave network device ndo_close
|
||||||
function when a port is administratively brought down, this function should be
|
function when a port is administratively brought down, this function should
|
||||||
fully disabling a given switch port. DSA takes care of marking the port with
|
fully disable a given switch port. DSA takes care of marking the port with
|
||||||
``BR_STATE_DISABLED`` and propagating changes to the hardware if this port is
|
``BR_STATE_DISABLED`` and propagating changes to the hardware if this port is
|
||||||
disabled while being a bridge member
|
disabled while being a bridge member
|
||||||
|
|
||||||
|
@ -622,12 +622,12 @@ Bridge layer
|
||||||
------------
|
------------
|
||||||
|
|
||||||
- ``port_bridge_join``: bridge layer function invoked when a given switch port is
|
- ``port_bridge_join``: bridge layer function invoked when a given switch port is
|
||||||
added to a bridge, this function should be doing the necessary at the switch
|
added to a bridge, this function should do what's necessary at the switch
|
||||||
level to permit the joining port from being added to the relevant logical
|
level to permit the joining port to be added to the relevant logical
|
||||||
domain for it to ingress/egress traffic with other members of the bridge.
|
domain for it to ingress/egress traffic with other members of the bridge.
|
||||||
|
|
||||||
- ``port_bridge_leave``: bridge layer function invoked when a given switch port is
|
- ``port_bridge_leave``: bridge layer function invoked when a given switch port is
|
||||||
removed from a bridge, this function should be doing the necessary at the
|
removed from a bridge, this function should do what's necessary at the
|
||||||
switch level to deny the leaving port from ingress/egress traffic from the
|
switch level to deny the leaving port from ingress/egress traffic from the
|
||||||
remaining bridge members. When the port leaves the bridge, it should be aged
|
remaining bridge members. When the port leaves the bridge, it should be aged
|
||||||
out at the switch hardware for the switch to (re) learn MAC addresses behind
|
out at the switch hardware for the switch to (re) learn MAC addresses behind
|
||||||
|
@ -663,7 +663,7 @@ Bridge layer
|
||||||
point for drivers that need to configure the hardware for enabling this
|
point for drivers that need to configure the hardware for enabling this
|
||||||
feature.
|
feature.
|
||||||
|
|
||||||
- ``port_bridge_tx_fwd_unoffload``: bridge layer function invoken when a driver
|
- ``port_bridge_tx_fwd_unoffload``: bridge layer function invoked when a driver
|
||||||
leaves a bridge port which had the TX forwarding offload feature enabled.
|
leaves a bridge port which had the TX forwarding offload feature enabled.
|
||||||
|
|
||||||
Bridge VLAN filtering
|
Bridge VLAN filtering
|
||||||
|
|
|
@ -40,7 +40,8 @@ static void msr_save_context(struct saved_context *ctxt)
|
||||||
struct saved_msr *end = msr + ctxt->saved_msrs.num;
|
struct saved_msr *end = msr + ctxt->saved_msrs.num;
|
||||||
|
|
||||||
while (msr < end) {
|
while (msr < end) {
|
||||||
msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
|
if (msr->valid)
|
||||||
|
rdmsrl(msr->info.msr_no, msr->info.reg.q);
|
||||||
msr++;
|
msr++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -424,8 +425,10 @@ static int msr_build_context(const u32 *msr_id, const int num)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
|
for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
|
||||||
|
u64 dummy;
|
||||||
|
|
||||||
msr_array[i].info.msr_no = msr_id[j];
|
msr_array[i].info.msr_no = msr_id[j];
|
||||||
msr_array[i].valid = false;
|
msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy);
|
||||||
msr_array[i].info.reg.q = 0;
|
msr_array[i].info.reg.q = 0;
|
||||||
}
|
}
|
||||||
saved_msrs->num = total_num;
|
saved_msrs->num = total_num;
|
||||||
|
@ -500,10 +503,24 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pm_save_spec_msr(void)
|
||||||
|
{
|
||||||
|
u32 spec_msr_id[] = {
|
||||||
|
MSR_IA32_SPEC_CTRL,
|
||||||
|
MSR_IA32_TSX_CTRL,
|
||||||
|
MSR_TSX_FORCE_ABORT,
|
||||||
|
MSR_IA32_MCU_OPT_CTRL,
|
||||||
|
MSR_AMD64_LS_CFG,
|
||||||
|
};
|
||||||
|
|
||||||
|
msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
|
||||||
|
}
|
||||||
|
|
||||||
static int pm_check_save_msr(void)
|
static int pm_check_save_msr(void)
|
||||||
{
|
{
|
||||||
dmi_check_system(msr_save_dmi_table);
|
dmi_check_system(msr_save_dmi_table);
|
||||||
pm_cpu_check(msr_save_cpu_table);
|
pm_cpu_check(msr_save_cpu_table);
|
||||||
|
pm_save_spec_msr();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,14 +115,16 @@ config SATA_AHCI
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
config SATA_LPM_POLICY
|
config SATA_MOBILE_LPM_POLICY
|
||||||
int "Default SATA Link Power Management policy for low power chipsets"
|
int "Default SATA Link Power Management policy for low power chipsets"
|
||||||
range 0 4
|
range 0 4
|
||||||
default 0
|
default 0
|
||||||
depends on SATA_AHCI
|
depends on SATA_AHCI
|
||||||
help
|
help
|
||||||
Select the Default SATA Link Power Management (LPM) policy to use
|
Select the Default SATA Link Power Management (LPM) policy to use
|
||||||
for chipsets / "South Bridges" designated as supporting low power.
|
for chipsets / "South Bridges" supporting low-power modes. Such
|
||||||
|
chipsets are typically found on most laptops but desktops and
|
||||||
|
servers now also widely use chipsets supporting low power modes.
|
||||||
|
|
||||||
The value set has the following meanings:
|
The value set has the following meanings:
|
||||||
0 => Keep firmware settings
|
0 => Keep firmware settings
|
||||||
|
|
|
@ -1595,7 +1595,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
|
||||||
static void ahci_update_initial_lpm_policy(struct ata_port *ap,
|
static void ahci_update_initial_lpm_policy(struct ata_port *ap,
|
||||||
struct ahci_host_priv *hpriv)
|
struct ahci_host_priv *hpriv)
|
||||||
{
|
{
|
||||||
int policy = CONFIG_SATA_LPM_POLICY;
|
int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
|
||||||
|
|
||||||
|
|
||||||
/* Ignore processing for chipsets that don't use policy */
|
/* Ignore processing for chipsets that don't use policy */
|
||||||
|
|
|
@ -236,7 +236,7 @@ enum {
|
||||||
AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
|
AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
|
||||||
only registers */
|
only registers */
|
||||||
AHCI_HFLAG_USE_LPM_POLICY = (1 << 25), /* chipset that should use
|
AHCI_HFLAG_USE_LPM_POLICY = (1 << 25), /* chipset that should use
|
||||||
SATA_LPM_POLICY
|
SATA_MOBILE_LPM_POLICY
|
||||||
as default lpm_policy */
|
as default lpm_policy */
|
||||||
AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
|
AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
|
||||||
suspend/resume */
|
suspend/resume */
|
||||||
|
|
|
@ -4014,6 +4014,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||||
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||||
|
{ "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||||
|
ATA_HORKAGE_NO_DMA_LOG |
|
||||||
|
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||||
{ "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
{ "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||||
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||||
|
|
|
@ -1634,7 +1634,7 @@ EXPORT_SYMBOL_GPL(ata_sff_interrupt);
|
||||||
|
|
||||||
void ata_sff_lost_interrupt(struct ata_port *ap)
|
void ata_sff_lost_interrupt(struct ata_port *ap)
|
||||||
{
|
{
|
||||||
u8 status;
|
u8 status = 0;
|
||||||
struct ata_queued_cmd *qc;
|
struct ata_queued_cmd *qc;
|
||||||
|
|
||||||
/* Only one outstanding command per SFF channel */
|
/* Only one outstanding command per SFF channel */
|
||||||
|
|
|
@ -137,7 +137,11 @@ struct sata_dwc_device {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#define SATA_DWC_QCMD_MAX 32
|
/*
|
||||||
|
* Allow one extra special slot for commands and DMA management
|
||||||
|
* to account for libata internal commands.
|
||||||
|
*/
|
||||||
|
#define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
|
||||||
|
|
||||||
struct sata_dwc_device_port {
|
struct sata_dwc_device_port {
|
||||||
struct sata_dwc_device *hsdev;
|
struct sata_dwc_device *hsdev;
|
||||||
|
|
|
@ -437,11 +437,8 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
|
||||||
* This shouldn't be set by functions like add_device_randomness(),
|
* This shouldn't be set by functions like add_device_randomness(),
|
||||||
* where we can't trust the buffer passed to it is guaranteed to be
|
* where we can't trust the buffer passed to it is guaranteed to be
|
||||||
* unpredictable (so it might not have any entropy at all).
|
* unpredictable (so it might not have any entropy at all).
|
||||||
*
|
|
||||||
* Returns the number of bytes processed from input, which is bounded
|
|
||||||
* by CRNG_INIT_CNT_THRESH if account is true.
|
|
||||||
*/
|
*/
|
||||||
static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
|
static void crng_pre_init_inject(const void *input, size_t len, bool account)
|
||||||
{
|
{
|
||||||
static int crng_init_cnt = 0;
|
static int crng_init_cnt = 0;
|
||||||
struct blake2s_state hash;
|
struct blake2s_state hash;
|
||||||
|
@ -452,18 +449,15 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
|
||||||
spin_lock_irqsave(&base_crng.lock, flags);
|
spin_lock_irqsave(&base_crng.lock, flags);
|
||||||
if (crng_init != 0) {
|
if (crng_init != 0) {
|
||||||
spin_unlock_irqrestore(&base_crng.lock, flags);
|
spin_unlock_irqrestore(&base_crng.lock, flags);
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (account)
|
|
||||||
len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
|
|
||||||
|
|
||||||
blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
|
blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
|
||||||
blake2s_update(&hash, input, len);
|
blake2s_update(&hash, input, len);
|
||||||
blake2s_final(&hash, base_crng.key);
|
blake2s_final(&hash, base_crng.key);
|
||||||
|
|
||||||
if (account) {
|
if (account) {
|
||||||
crng_init_cnt += len;
|
crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
|
||||||
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
|
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
|
||||||
++base_crng.generation;
|
++base_crng.generation;
|
||||||
crng_init = 1;
|
crng_init = 1;
|
||||||
|
@ -474,8 +468,6 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
|
||||||
|
|
||||||
if (crng_init == 1)
|
if (crng_init == 1)
|
||||||
pr_notice("fast init done\n");
|
pr_notice("fast init done\n");
|
||||||
|
|
||||||
return len;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _get_random_bytes(void *buf, size_t nbytes)
|
static void _get_random_bytes(void *buf, size_t nbytes)
|
||||||
|
@ -531,7 +523,6 @@ EXPORT_SYMBOL(get_random_bytes);
|
||||||
|
|
||||||
static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
|
static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
|
||||||
{
|
{
|
||||||
bool large_request = nbytes > 256;
|
|
||||||
ssize_t ret = 0;
|
ssize_t ret = 0;
|
||||||
size_t len;
|
size_t len;
|
||||||
u32 chacha_state[CHACHA_STATE_WORDS];
|
u32 chacha_state[CHACHA_STATE_WORDS];
|
||||||
|
@ -540,22 +531,23 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
|
||||||
if (!nbytes)
|
if (!nbytes)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
len = min_t(size_t, 32, nbytes);
|
/*
|
||||||
crng_make_state(chacha_state, output, len);
|
* Immediately overwrite the ChaCha key at index 4 with random
|
||||||
|
* bytes, in case userspace causes copy_to_user() below to sleep
|
||||||
if (copy_to_user(buf, output, len))
|
* forever, so that we still retain forward secrecy in that case.
|
||||||
return -EFAULT;
|
*/
|
||||||
nbytes -= len;
|
crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
|
||||||
buf += len;
|
/*
|
||||||
ret += len;
|
* However, if we're doing a read of len <= 32, we don't need to
|
||||||
|
* use chacha_state after, so we can simply return those bytes to
|
||||||
while (nbytes) {
|
* the user directly.
|
||||||
if (large_request && need_resched()) {
|
*/
|
||||||
if (signal_pending(current))
|
if (nbytes <= CHACHA_KEY_SIZE) {
|
||||||
break;
|
ret = copy_to_user(buf, &chacha_state[4], nbytes) ? -EFAULT : nbytes;
|
||||||
schedule();
|
goto out_zero_chacha;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
do {
|
||||||
chacha20_block(chacha_state, output);
|
chacha20_block(chacha_state, output);
|
||||||
if (unlikely(chacha_state[12] == 0))
|
if (unlikely(chacha_state[12] == 0))
|
||||||
++chacha_state[13];
|
++chacha_state[13];
|
||||||
|
@ -569,10 +561,18 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
|
||||||
nbytes -= len;
|
nbytes -= len;
|
||||||
buf += len;
|
buf += len;
|
||||||
ret += len;
|
ret += len;
|
||||||
}
|
|
||||||
|
|
||||||
memzero_explicit(chacha_state, sizeof(chacha_state));
|
BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
|
||||||
|
if (!(ret % PAGE_SIZE) && nbytes) {
|
||||||
|
if (signal_pending(current))
|
||||||
|
break;
|
||||||
|
cond_resched();
|
||||||
|
}
|
||||||
|
} while (nbytes);
|
||||||
|
|
||||||
memzero_explicit(output, sizeof(output));
|
memzero_explicit(output, sizeof(output));
|
||||||
|
out_zero_chacha:
|
||||||
|
memzero_explicit(chacha_state, sizeof(chacha_state));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1141,12 +1141,9 @@ void add_hwgenerator_randomness(const void *buffer, size_t count,
|
||||||
size_t entropy)
|
size_t entropy)
|
||||||
{
|
{
|
||||||
if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) {
|
if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) {
|
||||||
size_t ret = crng_pre_init_inject(buffer, count, true);
|
crng_pre_init_inject(buffer, count, true);
|
||||||
mix_pool_bytes(buffer, ret);
|
mix_pool_bytes(buffer, count);
|
||||||
count -= ret;
|
return;
|
||||||
buffer += ret;
|
|
||||||
if (!count || crng_init == 0)
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1545,6 +1542,13 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
|
||||||
{
|
{
|
||||||
static int maxwarn = 10;
|
static int maxwarn = 10;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Opportunistically attempt to initialize the RNG on platforms that
|
||||||
|
* have fast cycle counters, but don't (for now) require it to succeed.
|
||||||
|
*/
|
||||||
|
if (!crng_ready())
|
||||||
|
try_to_generate_entropy();
|
||||||
|
|
||||||
if (!crng_ready() && maxwarn > 0) {
|
if (!crng_ready() && maxwarn > 0) {
|
||||||
maxwarn--;
|
maxwarn--;
|
||||||
if (__ratelimit(&urandom_warning))
|
if (__ratelimit(&urandom_warning))
|
||||||
|
|
|
@ -380,7 +380,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
|
||||||
* execute:
|
* execute:
|
||||||
*
|
*
|
||||||
* (a) In the "normal (i.e., not resuming from hibernation)" path,
|
* (a) In the "normal (i.e., not resuming from hibernation)" path,
|
||||||
* the full barrier in smp_store_mb() guarantees that the store
|
* the full barrier in virt_store_mb() guarantees that the store
|
||||||
* is propagated to all CPUs before the add_channel_work work
|
* is propagated to all CPUs before the add_channel_work work
|
||||||
* is queued. In turn, add_channel_work is queued before the
|
* is queued. In turn, add_channel_work is queued before the
|
||||||
* channel's ring buffer is allocated/initialized and the
|
* channel's ring buffer is allocated/initialized and the
|
||||||
|
@ -392,14 +392,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
|
||||||
* recv_int_page before retrieving the channel pointer from the
|
* recv_int_page before retrieving the channel pointer from the
|
||||||
* array of channels.
|
* array of channels.
|
||||||
*
|
*
|
||||||
* (b) In the "resuming from hibernation" path, the smp_store_mb()
|
* (b) In the "resuming from hibernation" path, the virt_store_mb()
|
||||||
* guarantees that the store is propagated to all CPUs before
|
* guarantees that the store is propagated to all CPUs before
|
||||||
* the VMBus connection is marked as ready for the resume event
|
* the VMBus connection is marked as ready for the resume event
|
||||||
* (cf. check_ready_for_resume_event()). The interrupt handler
|
* (cf. check_ready_for_resume_event()). The interrupt handler
|
||||||
* of the VMBus driver and vmbus_chan_sched() can not run before
|
* of the VMBus driver and vmbus_chan_sched() can not run before
|
||||||
* vmbus_bus_resume() has completed execution (cf. resume_noirq).
|
* vmbus_bus_resume() has completed execution (cf. resume_noirq).
|
||||||
*/
|
*/
|
||||||
smp_store_mb(
|
virt_store_mb(
|
||||||
vmbus_connection.channels[channel->offermsg.child_relid],
|
vmbus_connection.channels[channel->offermsg.child_relid],
|
||||||
channel);
|
channel);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
|
#include <linux/count_zeros.h>
|
||||||
#include <linux/memory_hotplug.h>
|
#include <linux/memory_hotplug.h>
|
||||||
#include <linux/memory.h>
|
#include <linux/memory.h>
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
|
@ -1130,6 +1131,7 @@ static void post_status(struct hv_dynmem_device *dm)
|
||||||
struct dm_status status;
|
struct dm_status status;
|
||||||
unsigned long now = jiffies;
|
unsigned long now = jiffies;
|
||||||
unsigned long last_post = last_post_time;
|
unsigned long last_post = last_post_time;
|
||||||
|
unsigned long num_pages_avail, num_pages_committed;
|
||||||
|
|
||||||
if (pressure_report_delay > 0) {
|
if (pressure_report_delay > 0) {
|
||||||
--pressure_report_delay;
|
--pressure_report_delay;
|
||||||
|
@ -1154,16 +1156,21 @@ static void post_status(struct hv_dynmem_device *dm)
|
||||||
* num_pages_onlined) as committed to the host, otherwise it can try
|
* num_pages_onlined) as committed to the host, otherwise it can try
|
||||||
* asking us to balloon them out.
|
* asking us to balloon them out.
|
||||||
*/
|
*/
|
||||||
status.num_avail = si_mem_available();
|
num_pages_avail = si_mem_available();
|
||||||
status.num_committed = vm_memory_committed() +
|
num_pages_committed = vm_memory_committed() +
|
||||||
dm->num_pages_ballooned +
|
dm->num_pages_ballooned +
|
||||||
(dm->num_pages_added > dm->num_pages_onlined ?
|
(dm->num_pages_added > dm->num_pages_onlined ?
|
||||||
dm->num_pages_added - dm->num_pages_onlined : 0) +
|
dm->num_pages_added - dm->num_pages_onlined : 0) +
|
||||||
compute_balloon_floor();
|
compute_balloon_floor();
|
||||||
|
|
||||||
trace_balloon_status(status.num_avail, status.num_committed,
|
trace_balloon_status(num_pages_avail, num_pages_committed,
|
||||||
vm_memory_committed(), dm->num_pages_ballooned,
|
vm_memory_committed(), dm->num_pages_ballooned,
|
||||||
dm->num_pages_added, dm->num_pages_onlined);
|
dm->num_pages_added, dm->num_pages_onlined);
|
||||||
|
|
||||||
|
/* Convert numbers of pages into numbers of HV_HYP_PAGEs. */
|
||||||
|
status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE;
|
||||||
|
status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If our transaction ID is no longer current, just don't
|
* If our transaction ID is no longer current, just don't
|
||||||
* send the status. This can happen if we were interrupted
|
* send the status. This can happen if we were interrupted
|
||||||
|
@ -1653,6 +1660,38 @@ static void disable_page_reporting(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int ballooning_enabled(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE),
|
||||||
|
* since currently it's unclear to us whether an unballoon request can
|
||||||
|
* make sure all page ranges are guest page size aligned.
|
||||||
|
*/
|
||||||
|
if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
|
||||||
|
pr_info("Ballooning disabled because page size is not 4096 bytes\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hot_add_enabled(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Disable hot add on ARM64, because we currently rely on
|
||||||
|
* memory_add_physaddr_to_nid() to get a node id of a hot add range,
|
||||||
|
* however ARM64's memory_add_physaddr_to_nid() always return 0 and
|
||||||
|
* DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for
|
||||||
|
* add_memory().
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_ARM64)) {
|
||||||
|
pr_info("Memory hot add disabled on ARM64\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static int balloon_connect_vsp(struct hv_device *dev)
|
static int balloon_connect_vsp(struct hv_device *dev)
|
||||||
{
|
{
|
||||||
struct dm_version_request version_req;
|
struct dm_version_request version_req;
|
||||||
|
@ -1724,8 +1763,8 @@ static int balloon_connect_vsp(struct hv_device *dev)
|
||||||
* currently still requires the bits to be set, so we have to add code
|
* currently still requires the bits to be set, so we have to add code
|
||||||
* to fail the host's hot-add and balloon up/down requests, if any.
|
* to fail the host's hot-add and balloon up/down requests, if any.
|
||||||
*/
|
*/
|
||||||
cap_msg.caps.cap_bits.balloon = 1;
|
cap_msg.caps.cap_bits.balloon = ballooning_enabled();
|
||||||
cap_msg.caps.cap_bits.hot_add = 1;
|
cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Specify our alignment requirements as it relates
|
* Specify our alignment requirements as it relates
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/panic_notifier.h>
|
#include <linux/panic_notifier.h>
|
||||||
#include <linux/ptrace.h>
|
#include <linux/ptrace.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/dma-map-ops.h>
|
||||||
#include <asm/hyperv-tlfs.h>
|
#include <asm/hyperv-tlfs.h>
|
||||||
#include <asm/mshyperv.h>
|
#include <asm/mshyperv.h>
|
||||||
|
|
||||||
|
@ -218,6 +219,16 @@ bool hv_query_ext_cap(u64 cap_query)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hv_query_ext_cap);
|
EXPORT_SYMBOL_GPL(hv_query_ext_cap);
|
||||||
|
|
||||||
|
void hv_setup_dma_ops(struct device *dev, bool coherent)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Hyper-V does not offer a vIOMMU in the guest
|
||||||
|
* VM, so pass 0/NULL for the IOMMU settings
|
||||||
|
*/
|
||||||
|
arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
|
||||||
|
|
||||||
bool hv_is_hibernation_supported(void)
|
bool hv_is_hibernation_supported(void)
|
||||||
{
|
{
|
||||||
return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
|
return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
|
||||||
|
|
|
@ -439,7 +439,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||||
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
|
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
|
||||||
{
|
{
|
||||||
u32 priv_read_loc = rbi->priv_read_index;
|
u32 priv_read_loc = rbi->priv_read_index;
|
||||||
u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
|
u32 write_loc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The Hyper-V host writes the packet data, then uses
|
||||||
|
* store_release() to update the write_index. Use load_acquire()
|
||||||
|
* here to prevent loads of the packet data from being re-ordered
|
||||||
|
* before the read of the write_index and potentially getting
|
||||||
|
* stale data.
|
||||||
|
*/
|
||||||
|
write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
|
||||||
|
|
||||||
if (write_loc >= priv_read_loc)
|
if (write_loc >= priv_read_loc)
|
||||||
return write_loc - priv_read_loc;
|
return write_loc - priv_read_loc;
|
||||||
|
|
|
@ -77,8 +77,8 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hyper-V should be notified only once about a panic. If we will be
|
* Hyper-V should be notified only once about a panic. If we will be
|
||||||
* doing hyperv_report_panic_msg() later with kmsg data, don't do
|
* doing hv_kmsg_dump() with kmsg data later, don't do the notification
|
||||||
* the notification here.
|
* here.
|
||||||
*/
|
*/
|
||||||
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
|
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
|
||||||
&& hyperv_report_reg()) {
|
&& hyperv_report_reg()) {
|
||||||
|
@ -100,8 +100,8 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hyper-V should be notified only once about a panic. If we will be
|
* Hyper-V should be notified only once about a panic. If we will be
|
||||||
* doing hyperv_report_panic_msg() later with kmsg data, don't do
|
* doing hv_kmsg_dump() with kmsg data later, don't do the notification
|
||||||
* the notification here.
|
* here.
|
||||||
*/
|
*/
|
||||||
if (hyperv_report_reg())
|
if (hyperv_report_reg())
|
||||||
hyperv_report_panic(regs, val, true);
|
hyperv_report_panic(regs, val, true);
|
||||||
|
@ -920,6 +920,21 @@ static int vmbus_probe(struct device *child_device)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vmbus_dma_configure -- Configure DMA coherence for VMbus device
|
||||||
|
*/
|
||||||
|
static int vmbus_dma_configure(struct device *child_device)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* On ARM64, propagate the DMA coherence setting from the top level
|
||||||
|
* VMbus ACPI device to the child VMbus device being added here.
|
||||||
|
* On x86/x64 coherence is assumed and these calls have no effect.
|
||||||
|
*/
|
||||||
|
hv_setup_dma_ops(child_device,
|
||||||
|
device_get_dma_attr(&hv_acpi_dev->dev) == DEV_DMA_COHERENT);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vmbus_remove - Remove a vmbus device
|
* vmbus_remove - Remove a vmbus device
|
||||||
*/
|
*/
|
||||||
|
@ -1040,6 +1055,7 @@ static struct bus_type hv_bus = {
|
||||||
.remove = vmbus_remove,
|
.remove = vmbus_remove,
|
||||||
.probe = vmbus_probe,
|
.probe = vmbus_probe,
|
||||||
.uevent = vmbus_uevent,
|
.uevent = vmbus_uevent,
|
||||||
|
.dma_configure = vmbus_dma_configure,
|
||||||
.dev_groups = vmbus_dev_groups,
|
.dev_groups = vmbus_dev_groups,
|
||||||
.drv_groups = vmbus_drv_groups,
|
.drv_groups = vmbus_drv_groups,
|
||||||
.bus_groups = vmbus_bus_groups,
|
.bus_groups = vmbus_bus_groups,
|
||||||
|
@ -1546,14 +1562,20 @@ static int vmbus_bus_init(void)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_connect;
|
goto err_connect;
|
||||||
|
|
||||||
|
if (hv_is_isolation_supported())
|
||||||
|
sysctl_record_panic_msg = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only register if the crash MSRs are available
|
* Only register if the crash MSRs are available
|
||||||
*/
|
*/
|
||||||
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
|
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
|
||||||
u64 hyperv_crash_ctl;
|
u64 hyperv_crash_ctl;
|
||||||
/*
|
/*
|
||||||
* Sysctl registration is not fatal, since by default
|
* Panic message recording (sysctl_record_panic_msg)
|
||||||
* reporting is enabled.
|
* is enabled by default in non-isolated guests and
|
||||||
|
* disabled by default in isolated guests; the panic
|
||||||
|
* message recording won't be available in isolated
|
||||||
|
* guests should the following registration fail.
|
||||||
*/
|
*/
|
||||||
hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
|
hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
|
||||||
if (!hv_ctl_table_hdr)
|
if (!hv_ctl_table_hdr)
|
||||||
|
@ -2097,6 +2119,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
|
||||||
child_device_obj->device.parent = &hv_acpi_dev->dev;
|
child_device_obj->device.parent = &hv_acpi_dev->dev;
|
||||||
child_device_obj->device.release = vmbus_device_release;
|
child_device_obj->device.release = vmbus_device_release;
|
||||||
|
|
||||||
|
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
|
||||||
|
child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
|
||||||
|
dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register with the LDM. This will kick off the driver/device
|
* Register with the LDM. This will kick off the driver/device
|
||||||
* binding...which will eventually call vmbus_match() and vmbus_probe()
|
* binding...which will eventually call vmbus_match() and vmbus_probe()
|
||||||
|
@ -2122,9 +2148,6 @@ int vmbus_device_register(struct hv_device *child_device_obj)
|
||||||
}
|
}
|
||||||
hv_debug_add_dev_dir(child_device_obj);
|
hv_debug_add_dev_dir(child_device_obj);
|
||||||
|
|
||||||
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
|
|
||||||
child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
|
|
||||||
dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_kset_unregister:
|
err_kset_unregister:
|
||||||
|
@ -2428,6 +2451,21 @@ static int vmbus_acpi_add(struct acpi_device *device)
|
||||||
|
|
||||||
hv_acpi_dev = device;
|
hv_acpi_dev = device;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Older versions of Hyper-V for ARM64 fail to include the _CCA
|
||||||
|
* method on the top level VMbus device in the DSDT. But devices
|
||||||
|
* are hardware coherent in all current Hyper-V use cases, so fix
|
||||||
|
* up the ACPI device to behave as if _CCA is present and indicates
|
||||||
|
* hardware coherence.
|
||||||
|
*/
|
||||||
|
ACPI_COMPANION_SET(&device->dev, device);
|
||||||
|
if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
|
||||||
|
device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
|
||||||
|
pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
|
||||||
|
device->flags.cca_seen = true;
|
||||||
|
device->flags.coherent_dma = true;
|
||||||
|
}
|
||||||
|
|
||||||
result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
|
result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
|
||||||
vmbus_walk_resources, NULL);
|
vmbus_walk_resources, NULL);
|
||||||
|
|
||||||
|
@ -2780,10 +2818,15 @@ static void __exit vmbus_exit(void)
|
||||||
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
|
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
|
||||||
kmsg_dump_unregister(&hv_kmsg_dumper);
|
kmsg_dump_unregister(&hv_kmsg_dumper);
|
||||||
unregister_die_notifier(&hyperv_die_block);
|
unregister_die_notifier(&hyperv_die_block);
|
||||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
|
||||||
&hyperv_panic_block);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The panic notifier is always registered, hence we should
|
||||||
|
* also unconditionally unregister it here as well.
|
||||||
|
*/
|
||||||
|
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||||
|
&hyperv_panic_block);
|
||||||
|
|
||||||
free_page((unsigned long)hv_panic_page);
|
free_page((unsigned long)hv_panic_page);
|
||||||
unregister_sysctl_table(hv_ctl_table_hdr);
|
unregister_sysctl_table(hv_ctl_table_hdr);
|
||||||
hv_ctl_table_hdr = NULL;
|
hv_ctl_table_hdr = NULL;
|
||||||
|
|
|
@ -3253,6 +3253,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
|
||||||
}
|
}
|
||||||
qidx = bp->tc_to_qidx[j];
|
qidx = bp->tc_to_qidx[j];
|
||||||
ring->queue_id = bp->q_info[qidx].queue_id;
|
ring->queue_id = bp->q_info[qidx].queue_id;
|
||||||
|
spin_lock_init(&txr->xdp_tx_lock);
|
||||||
if (i < bp->tx_nr_rings_xdp)
|
if (i < bp->tx_nr_rings_xdp)
|
||||||
continue;
|
continue;
|
||||||
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
|
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
|
||||||
|
@ -10338,6 +10339,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
|
||||||
if (irq_re_init)
|
if (irq_re_init)
|
||||||
udp_tunnel_nic_reset_ntf(bp->dev);
|
udp_tunnel_nic_reset_ntf(bp->dev);
|
||||||
|
|
||||||
|
if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
|
||||||
|
if (!static_key_enabled(&bnxt_xdp_locking_key))
|
||||||
|
static_branch_enable(&bnxt_xdp_locking_key);
|
||||||
|
} else if (static_key_enabled(&bnxt_xdp_locking_key)) {
|
||||||
|
static_branch_disable(&bnxt_xdp_locking_key);
|
||||||
|
}
|
||||||
set_bit(BNXT_STATE_OPEN, &bp->state);
|
set_bit(BNXT_STATE_OPEN, &bp->state);
|
||||||
bnxt_enable_int(bp);
|
bnxt_enable_int(bp);
|
||||||
/* Enable TX queues */
|
/* Enable TX queues */
|
||||||
|
|
|
@ -593,7 +593,8 @@ struct nqe_cn {
|
||||||
#define BNXT_MAX_MTU 9500
|
#define BNXT_MAX_MTU 9500
|
||||||
#define BNXT_MAX_PAGE_MODE_MTU \
|
#define BNXT_MAX_PAGE_MODE_MTU \
|
||||||
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
|
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
|
||||||
XDP_PACKET_HEADROOM)
|
XDP_PACKET_HEADROOM - \
|
||||||
|
SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
|
||||||
|
|
||||||
#define BNXT_MIN_PKT_SIZE 52
|
#define BNXT_MIN_PKT_SIZE 52
|
||||||
|
|
||||||
|
@ -800,6 +801,8 @@ struct bnxt_tx_ring_info {
|
||||||
u32 dev_state;
|
u32 dev_state;
|
||||||
|
|
||||||
struct bnxt_ring_struct tx_ring_struct;
|
struct bnxt_ring_struct tx_ring_struct;
|
||||||
|
/* Synchronize simultaneous xdp_xmit on same ring */
|
||||||
|
spinlock_t xdp_tx_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define BNXT_LEGACY_COAL_CMPL_PARAMS \
|
#define BNXT_LEGACY_COAL_CMPL_PARAMS \
|
||||||
|
|
|
@ -20,6 +20,8 @@
|
||||||
#include "bnxt.h"
|
#include "bnxt.h"
|
||||||
#include "bnxt_xdp.h"
|
#include "bnxt_xdp.h"
|
||||||
|
|
||||||
|
DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
|
||||||
|
|
||||||
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
|
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
|
||||||
struct bnxt_tx_ring_info *txr,
|
struct bnxt_tx_ring_info *txr,
|
||||||
dma_addr_t mapping, u32 len)
|
dma_addr_t mapping, u32 len)
|
||||||
|
@ -227,11 +229,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
|
||||||
ring = smp_processor_id() % bp->tx_nr_rings_xdp;
|
ring = smp_processor_id() % bp->tx_nr_rings_xdp;
|
||||||
txr = &bp->tx_ring[ring];
|
txr = &bp->tx_ring[ring];
|
||||||
|
|
||||||
|
if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (static_branch_unlikely(&bnxt_xdp_locking_key))
|
||||||
|
spin_lock(&txr->xdp_tx_lock);
|
||||||
|
|
||||||
for (i = 0; i < num_frames; i++) {
|
for (i = 0; i < num_frames; i++) {
|
||||||
struct xdp_frame *xdp = frames[i];
|
struct xdp_frame *xdp = frames[i];
|
||||||
|
|
||||||
if (!txr || !bnxt_tx_avail(bp, txr) ||
|
if (!bnxt_tx_avail(bp, txr))
|
||||||
!(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
|
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
|
||||||
|
@ -250,6 +257,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
|
||||||
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
|
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (static_branch_unlikely(&bnxt_xdp_locking_key))
|
||||||
|
spin_unlock(&txr->xdp_tx_lock);
|
||||||
|
|
||||||
return nxmit;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,8 @@
|
||||||
#ifndef BNXT_XDP_H
|
#ifndef BNXT_XDP_H
|
||||||
#define BNXT_XDP_H
|
#define BNXT_XDP_H
|
||||||
|
|
||||||
|
DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
|
||||||
|
|
||||||
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
|
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
|
||||||
struct bnxt_tx_ring_info *txr,
|
struct bnxt_tx_ring_info *txr,
|
||||||
dma_addr_t mapping, u32 len);
|
dma_addr_t mapping, u32 len);
|
||||||
|
|
|
@ -167,7 +167,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
|
||||||
base = of_iomap(node, 0);
|
base = of_iomap(node, 0);
|
||||||
if (!base) {
|
if (!base) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto err_close;
|
goto err_put;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = fsl_mc_allocate_irqs(mc_dev);
|
err = fsl_mc_allocate_irqs(mc_dev);
|
||||||
|
@ -210,6 +210,8 @@ err_free_mc_irq:
|
||||||
fsl_mc_free_irqs(mc_dev);
|
fsl_mc_free_irqs(mc_dev);
|
||||||
err_unmap:
|
err_unmap:
|
||||||
iounmap(base);
|
iounmap(base);
|
||||||
|
err_put:
|
||||||
|
of_node_put(node);
|
||||||
err_close:
|
err_close:
|
||||||
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
|
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
|
||||||
err_free_mcp:
|
err_free_mcp:
|
||||||
|
|
|
@ -586,8 +586,8 @@ static int fun_get_dev_limits(struct fun_dev *fdev)
|
||||||
/* Calculate the max QID based on SQ/CQ/doorbell counts.
|
/* Calculate the max QID based on SQ/CQ/doorbell counts.
|
||||||
* SQ/CQ doorbells alternate.
|
* SQ/CQ doorbells alternate.
|
||||||
*/
|
*/
|
||||||
num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) /
|
num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) >>
|
||||||
(fdev->db_stride * 4);
|
(2 + NVME_CAP_STRIDE(fdev->cap_reg));
|
||||||
fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
|
fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
|
||||||
fdev->kern_end_qid = fdev->max_qid + 1;
|
fdev->kern_end_qid = fdev->max_qid + 1;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -301,7 +301,6 @@ enum ice_vsi_state {
|
||||||
ICE_VSI_NETDEV_REGISTERED,
|
ICE_VSI_NETDEV_REGISTERED,
|
||||||
ICE_VSI_UMAC_FLTR_CHANGED,
|
ICE_VSI_UMAC_FLTR_CHANGED,
|
||||||
ICE_VSI_MMAC_FLTR_CHANGED,
|
ICE_VSI_MMAC_FLTR_CHANGED,
|
||||||
ICE_VSI_VLAN_FLTR_CHANGED,
|
|
||||||
ICE_VSI_PROMISC_CHANGED,
|
ICE_VSI_PROMISC_CHANGED,
|
||||||
ICE_VSI_STATE_NBITS /* must be last */
|
ICE_VSI_STATE_NBITS /* must be last */
|
||||||
};
|
};
|
||||||
|
@ -672,7 +671,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
|
||||||
|
|
||||||
static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
|
static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
|
||||||
{
|
{
|
||||||
return !!vsi->xdp_prog;
|
return !!READ_ONCE(vsi->xdp_prog);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
|
static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
|
||||||
|
|
|
@ -58,7 +58,16 @@ int
|
||||||
ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
|
ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
|
||||||
u8 promisc_mask)
|
u8 promisc_mask)
|
||||||
{
|
{
|
||||||
return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
|
struct ice_pf *pf = hw->back;
|
||||||
|
int result;
|
||||||
|
|
||||||
|
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
|
||||||
|
if (result)
|
||||||
|
dev_err(ice_pf_to_dev(pf),
|
||||||
|
"Error setting promisc mode on VSI %i (rc=%d)\n",
|
||||||
|
vsi->vsi_num, result);
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -73,7 +82,16 @@ int
|
||||||
ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
|
ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
|
||||||
u8 promisc_mask)
|
u8 promisc_mask)
|
||||||
{
|
{
|
||||||
return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
|
struct ice_pf *pf = hw->back;
|
||||||
|
int result;
|
||||||
|
|
||||||
|
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
|
||||||
|
if (result)
|
||||||
|
dev_err(ice_pf_to_dev(pf),
|
||||||
|
"Error clearing promisc mode on VSI %i (rc=%d)\n",
|
||||||
|
vsi->vsi_num, result);
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -87,7 +105,16 @@ int
|
||||||
ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
||||||
u16 vid)
|
u16 vid)
|
||||||
{
|
{
|
||||||
return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
|
struct ice_pf *pf = hw->back;
|
||||||
|
int result;
|
||||||
|
|
||||||
|
result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
|
||||||
|
if (result)
|
||||||
|
dev_err(ice_pf_to_dev(pf),
|
||||||
|
"Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
|
||||||
|
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -101,7 +128,16 @@ int
|
||||||
ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
||||||
u16 vid)
|
u16 vid)
|
||||||
{
|
{
|
||||||
return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
|
struct ice_pf *pf = hw->back;
|
||||||
|
int result;
|
||||||
|
|
||||||
|
result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
|
||||||
|
if (result)
|
||||||
|
dev_err(ice_pf_to_dev(pf),
|
||||||
|
"Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
|
||||||
|
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1480,6 +1480,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
|
||||||
ring->tx_tstamps = &pf->ptp.port.tx;
|
ring->tx_tstamps = &pf->ptp.port.tx;
|
||||||
ring->dev = dev;
|
ring->dev = dev;
|
||||||
ring->count = vsi->num_tx_desc;
|
ring->count = vsi->num_tx_desc;
|
||||||
|
ring->txq_teid = ICE_INVAL_TEID;
|
||||||
if (dvm_ena)
|
if (dvm_ena)
|
||||||
ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
|
ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
|
||||||
else
|
else
|
||||||
|
@ -2983,6 +2984,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
|
||||||
|
ice_clear_dflt_vsi(pf->first_sw);
|
||||||
ice_fltr_remove_all(vsi);
|
ice_fltr_remove_all(vsi);
|
||||||
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
|
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
|
||||||
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
|
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
|
||||||
|
|
|
@ -243,8 +243,7 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
|
||||||
static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
|
static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
|
||||||
{
|
{
|
||||||
return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
|
return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
|
||||||
test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
|
test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
|
||||||
test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -260,10 +259,15 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
|
||||||
if (vsi->type != ICE_VSI_PF)
|
if (vsi->type != ICE_VSI_PF)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (ice_vsi_has_non_zero_vlans(vsi))
|
if (ice_vsi_has_non_zero_vlans(vsi)) {
|
||||||
status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
|
promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
|
||||||
else
|
status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
|
||||||
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
|
promisc_m);
|
||||||
|
} else {
|
||||||
|
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
|
||||||
|
promisc_m, 0);
|
||||||
|
}
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,10 +284,15 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
|
||||||
if (vsi->type != ICE_VSI_PF)
|
if (vsi->type != ICE_VSI_PF)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (ice_vsi_has_non_zero_vlans(vsi))
|
if (ice_vsi_has_non_zero_vlans(vsi)) {
|
||||||
status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
|
promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
|
||||||
else
|
status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
|
||||||
status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
|
promisc_m);
|
||||||
|
} else {
|
||||||
|
status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
|
||||||
|
promisc_m, 0);
|
||||||
|
}
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,7 +311,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
|
||||||
struct ice_pf *pf = vsi->back;
|
struct ice_pf *pf = vsi->back;
|
||||||
struct ice_hw *hw = &pf->hw;
|
struct ice_hw *hw = &pf->hw;
|
||||||
u32 changed_flags = 0;
|
u32 changed_flags = 0;
|
||||||
u8 promisc_m;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!vsi->netdev)
|
if (!vsi->netdev)
|
||||||
|
@ -320,7 +328,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
|
||||||
if (ice_vsi_fltr_changed(vsi)) {
|
if (ice_vsi_fltr_changed(vsi)) {
|
||||||
clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
|
clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
|
||||||
clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
|
clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
|
||||||
clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
|
|
||||||
|
|
||||||
/* grab the netdev's addr_list_lock */
|
/* grab the netdev's addr_list_lock */
|
||||||
netif_addr_lock_bh(netdev);
|
netif_addr_lock_bh(netdev);
|
||||||
|
@ -369,29 +376,15 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
|
||||||
/* check for changes in promiscuous modes */
|
/* check for changes in promiscuous modes */
|
||||||
if (changed_flags & IFF_ALLMULTI) {
|
if (changed_flags & IFF_ALLMULTI) {
|
||||||
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
|
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
|
||||||
if (ice_vsi_has_non_zero_vlans(vsi))
|
err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
|
||||||
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
|
|
||||||
else
|
|
||||||
promisc_m = ICE_MCAST_PROMISC_BITS;
|
|
||||||
|
|
||||||
err = ice_set_promisc(vsi, promisc_m);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
|
|
||||||
vsi->vsi_num);
|
|
||||||
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
|
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
|
||||||
goto out_promisc;
|
goto out_promisc;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
|
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
|
||||||
if (ice_vsi_has_non_zero_vlans(vsi))
|
err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
|
||||||
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
|
|
||||||
else
|
|
||||||
promisc_m = ICE_MCAST_PROMISC_BITS;
|
|
||||||
|
|
||||||
err = ice_clear_promisc(vsi, promisc_m);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
|
|
||||||
vsi->vsi_num);
|
|
||||||
vsi->current_netdev_flags |= IFF_ALLMULTI;
|
vsi->current_netdev_flags |= IFF_ALLMULTI;
|
||||||
goto out_promisc;
|
goto out_promisc;
|
||||||
}
|
}
|
||||||
|
@ -2569,7 +2562,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
|
||||||
spin_lock_init(&xdp_ring->tx_lock);
|
spin_lock_init(&xdp_ring->tx_lock);
|
||||||
for (j = 0; j < xdp_ring->count; j++) {
|
for (j = 0; j < xdp_ring->count; j++) {
|
||||||
tx_desc = ICE_TX_DESC(xdp_ring, j);
|
tx_desc = ICE_TX_DESC(xdp_ring, j);
|
||||||
tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
|
tx_desc->cmd_type_offset_bsz = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2765,8 +2758,10 @@ free_qmap:
|
||||||
|
|
||||||
ice_for_each_xdp_txq(vsi, i)
|
ice_for_each_xdp_txq(vsi, i)
|
||||||
if (vsi->xdp_rings[i]) {
|
if (vsi->xdp_rings[i]) {
|
||||||
if (vsi->xdp_rings[i]->desc)
|
if (vsi->xdp_rings[i]->desc) {
|
||||||
|
synchronize_rcu();
|
||||||
ice_free_tx_ring(vsi->xdp_rings[i]);
|
ice_free_tx_ring(vsi->xdp_rings[i]);
|
||||||
|
}
|
||||||
kfree_rcu(vsi->xdp_rings[i], rcu);
|
kfree_rcu(vsi->xdp_rings[i], rcu);
|
||||||
vsi->xdp_rings[i] = NULL;
|
vsi->xdp_rings[i] = NULL;
|
||||||
}
|
}
|
||||||
|
@ -3488,6 +3483,20 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||||
if (!vid)
|
if (!vid)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
|
||||||
|
usleep_range(1000, 2000);
|
||||||
|
|
||||||
|
/* Add multicast promisc rule for the VLAN ID to be added if
|
||||||
|
* all-multicast is currently enabled.
|
||||||
|
*/
|
||||||
|
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
|
||||||
|
ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
|
||||||
|
ICE_MCAST_VLAN_PROMISC_BITS,
|
||||||
|
vid);
|
||||||
|
if (ret)
|
||||||
|
goto finish;
|
||||||
|
}
|
||||||
|
|
||||||
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
|
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
|
||||||
|
|
||||||
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
|
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
|
||||||
|
@ -3495,8 +3504,23 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||||
*/
|
*/
|
||||||
vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
|
vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
|
||||||
ret = vlan_ops->add_vlan(vsi, &vlan);
|
ret = vlan_ops->add_vlan(vsi, &vlan);
|
||||||
if (!ret)
|
if (ret)
|
||||||
set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
|
goto finish;
|
||||||
|
|
||||||
|
/* If all-multicast is currently enabled and this VLAN ID is only one
|
||||||
|
* besides VLAN-0 we have to update look-up type of multicast promisc
|
||||||
|
* rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
|
||||||
|
*/
|
||||||
|
if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
|
||||||
|
ice_vsi_num_non_zero_vlans(vsi) == 1) {
|
||||||
|
ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
|
||||||
|
ICE_MCAST_PROMISC_BITS, 0);
|
||||||
|
ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
|
||||||
|
ICE_MCAST_VLAN_PROMISC_BITS, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
finish:
|
||||||
|
clear_bit(ICE_CFG_BUSY, vsi->state);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -3522,6 +3546,9 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||||
if (!vid)
|
if (!vid)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
|
||||||
|
usleep_range(1000, 2000);
|
||||||
|
|
||||||
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
|
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
|
||||||
|
|
||||||
/* Make sure VLAN delete is successful before updating VLAN
|
/* Make sure VLAN delete is successful before updating VLAN
|
||||||
|
@ -3530,10 +3557,33 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||||
vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
|
vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
|
||||||
ret = vlan_ops->del_vlan(vsi, &vlan);
|
ret = vlan_ops->del_vlan(vsi, &vlan);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto finish;
|
||||||
|
|
||||||
set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
|
/* Remove multicast promisc rule for the removed VLAN ID if
|
||||||
return 0;
|
* all-multicast is enabled.
|
||||||
|
*/
|
||||||
|
if (vsi->current_netdev_flags & IFF_ALLMULTI)
|
||||||
|
ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
|
||||||
|
ICE_MCAST_VLAN_PROMISC_BITS, vid);
|
||||||
|
|
||||||
|
if (!ice_vsi_has_non_zero_vlans(vsi)) {
|
||||||
|
/* Update look-up type of multicast promisc rule for VLAN 0
|
||||||
|
* from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
|
||||||
|
* all-multicast is enabled and VLAN 0 is the only VLAN rule.
|
||||||
|
*/
|
||||||
|
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
|
||||||
|
ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
|
||||||
|
ICE_MCAST_VLAN_PROMISC_BITS,
|
||||||
|
0);
|
||||||
|
ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
|
||||||
|
ICE_MCAST_PROMISC_BITS, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
finish:
|
||||||
|
clear_bit(ICE_CFG_BUSY, vsi->state);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -5475,16 +5525,19 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
|
||||||
|
|
||||||
/* Add filter for new MAC. If filter exists, return success */
|
/* Add filter for new MAC. If filter exists, return success */
|
||||||
err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
|
err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
|
||||||
if (err == -EEXIST)
|
if (err == -EEXIST) {
|
||||||
/* Although this MAC filter is already present in hardware it's
|
/* Although this MAC filter is already present in hardware it's
|
||||||
* possible in some cases (e.g. bonding) that dev_addr was
|
* possible in some cases (e.g. bonding) that dev_addr was
|
||||||
* modified outside of the driver and needs to be restored back
|
* modified outside of the driver and needs to be restored back
|
||||||
* to this value.
|
* to this value.
|
||||||
*/
|
*/
|
||||||
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
|
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
|
||||||
else if (err)
|
|
||||||
|
return 0;
|
||||||
|
} else if (err) {
|
||||||
/* error if the new filter addition failed */
|
/* error if the new filter addition failed */
|
||||||
err = -EADDRNOTAVAIL;
|
err = -EADDRNOTAVAIL;
|
||||||
|
}
|
||||||
|
|
||||||
err_update_filters:
|
err_update_filters:
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -1358,9 +1358,9 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Skip queue if not enabled */
|
|
||||||
if (!test_bit(vf_q_id, vf->txq_ena))
|
if (!test_bit(vf_q_id, vf->txq_ena))
|
||||||
continue;
|
dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
|
||||||
|
vf_q_id, vsi->vsi_num);
|
||||||
|
|
||||||
ice_fill_txq_meta(vsi, ring, &txq_meta);
|
ice_fill_txq_meta(vsi, ring, &txq_meta);
|
||||||
|
|
||||||
|
|
|
@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
|
||||||
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
|
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
|
||||||
{
|
{
|
||||||
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
|
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
|
||||||
if (ice_is_xdp_ena_vsi(vsi))
|
if (ice_is_xdp_ena_vsi(vsi)) {
|
||||||
|
synchronize_rcu();
|
||||||
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
|
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
|
||||||
|
}
|
||||||
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
|
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -918,7 +920,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
|
||||||
struct ice_vsi *vsi = np->vsi;
|
struct ice_vsi *vsi = np->vsi;
|
||||||
struct ice_tx_ring *ring;
|
struct ice_tx_ring *ring;
|
||||||
|
|
||||||
if (test_bit(ICE_DOWN, vsi->state))
|
if (test_bit(ICE_VSI_DOWN, vsi->state))
|
||||||
return -ENETDOWN;
|
return -ENETDOWN;
|
||||||
|
|
||||||
if (!ice_is_xdp_ena_vsi(vsi))
|
if (!ice_is_xdp_ena_vsi(vsi))
|
||||||
|
|
|
@ -2751,7 +2751,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = of_get_mac_address(pnp, ppd.mac_addr);
|
ret = of_get_mac_address(pnp, ppd.mac_addr);
|
||||||
if (ret)
|
if (ret == -EPROBE_DEFER)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
|
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
|
||||||
|
|
|
@ -28,6 +28,7 @@ config KS8842
|
||||||
config KS8851
|
config KS8851
|
||||||
tristate "Micrel KS8851 SPI"
|
tristate "Micrel KS8851 SPI"
|
||||||
depends on SPI
|
depends on SPI
|
||||||
|
depends on PTP_1588_CLOCK_OPTIONAL
|
||||||
select MII
|
select MII
|
||||||
select CRC32
|
select CRC32
|
||||||
select EEPROM_93CX6
|
select EEPROM_93CX6
|
||||||
|
@ -39,6 +40,7 @@ config KS8851
|
||||||
config KS8851_MLL
|
config KS8851_MLL
|
||||||
tristate "Micrel KS8851 MLL"
|
tristate "Micrel KS8851 MLL"
|
||||||
depends on HAS_IOMEM
|
depends on HAS_IOMEM
|
||||||
|
depends on PTP_1588_CLOCK_OPTIONAL
|
||||||
select MII
|
select MII
|
||||||
select CRC32
|
select CRC32
|
||||||
select EEPROM_93CX6
|
select EEPROM_93CX6
|
||||||
|
|
|
@ -2903,11 +2903,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
|
||||||
status = myri10ge_xmit(curr, dev);
|
status = myri10ge_xmit(curr, dev);
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
dev_kfree_skb_any(curr);
|
dev_kfree_skb_any(curr);
|
||||||
if (segs != NULL) {
|
skb_list_walk_safe(next, curr, next) {
|
||||||
curr = segs;
|
|
||||||
segs = next;
|
|
||||||
curr->next = NULL;
|
curr->next = NULL;
|
||||||
dev_kfree_skb_any(segs);
|
dev_kfree_skb_any(curr);
|
||||||
}
|
}
|
||||||
goto drop;
|
goto drop;
|
||||||
}
|
}
|
||||||
|
|
|
@ -489,7 +489,7 @@ struct split_type_defs {
|
||||||
|
|
||||||
#define STATIC_DEBUG_LINE_DWORDS 9
|
#define STATIC_DEBUG_LINE_DWORDS 9
|
||||||
|
|
||||||
#define NUM_COMMON_GLOBAL_PARAMS 11
|
#define NUM_COMMON_GLOBAL_PARAMS 10
|
||||||
|
|
||||||
#define MAX_RECURSION_DEPTH 10
|
#define MAX_RECURSION_DEPTH 10
|
||||||
|
|
||||||
|
|
|
@ -748,6 +748,9 @@ qede_build_skb(struct qede_rx_queue *rxq,
|
||||||
buf = page_address(bd->data) + bd->page_offset;
|
buf = page_address(bd->data) + bd->page_offset;
|
||||||
skb = build_skb(buf, rxq->rx_buf_seg_size);
|
skb = build_skb(buf, rxq->rx_buf_seg_size);
|
||||||
|
|
||||||
|
if (unlikely(!skb))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
skb_reserve(skb, pad);
|
skb_reserve(skb, pad);
|
||||||
skb_put(skb, len);
|
skb_put(skb, len);
|
||||||
|
|
||||||
|
|
|
@ -786,6 +786,85 @@ void efx_remove_channels(struct efx_nic *efx)
|
||||||
kfree(efx->xdp_tx_queues);
|
kfree(efx->xdp_tx_queues);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
|
||||||
|
struct efx_tx_queue *tx_queue)
|
||||||
|
{
|
||||||
|
if (xdp_queue_number >= efx->xdp_tx_queue_count)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
netif_dbg(efx, drv, efx->net_dev,
|
||||||
|
"Channel %u TXQ %u is XDP %u, HW %u\n",
|
||||||
|
tx_queue->channel->channel, tx_queue->label,
|
||||||
|
xdp_queue_number, tx_queue->queue);
|
||||||
|
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void efx_set_xdp_channels(struct efx_nic *efx)
|
||||||
|
{
|
||||||
|
struct efx_tx_queue *tx_queue;
|
||||||
|
struct efx_channel *channel;
|
||||||
|
unsigned int next_queue = 0;
|
||||||
|
int xdp_queue_number = 0;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* We need to mark which channels really have RX and TX
|
||||||
|
* queues, and adjust the TX queue numbers if we have separate
|
||||||
|
* RX-only and TX-only channels.
|
||||||
|
*/
|
||||||
|
efx_for_each_channel(channel, efx) {
|
||||||
|
if (channel->channel < efx->tx_channel_offset)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (efx_channel_is_xdp_tx(channel)) {
|
||||||
|
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||||
|
tx_queue->queue = next_queue++;
|
||||||
|
rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
|
||||||
|
tx_queue);
|
||||||
|
if (rc == 0)
|
||||||
|
xdp_queue_number++;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||||
|
tx_queue->queue = next_queue++;
|
||||||
|
netif_dbg(efx, drv, efx->net_dev,
|
||||||
|
"Channel %u TXQ %u is HW %u\n",
|
||||||
|
channel->channel, tx_queue->label,
|
||||||
|
tx_queue->queue);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If XDP is borrowing queues from net stack, it must
|
||||||
|
* use the queue with no csum offload, which is the
|
||||||
|
* first one of the channel
|
||||||
|
* (note: tx_queue_by_type is not initialized yet)
|
||||||
|
*/
|
||||||
|
if (efx->xdp_txq_queues_mode ==
|
||||||
|
EFX_XDP_TX_QUEUES_BORROWED) {
|
||||||
|
tx_queue = &channel->tx_queue[0];
|
||||||
|
rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
|
||||||
|
tx_queue);
|
||||||
|
if (rc == 0)
|
||||||
|
xdp_queue_number++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
|
||||||
|
xdp_queue_number != efx->xdp_tx_queue_count);
|
||||||
|
WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
|
||||||
|
xdp_queue_number > efx->xdp_tx_queue_count);
|
||||||
|
|
||||||
|
/* If we have more CPUs than assigned XDP TX queues, assign the already
|
||||||
|
* existing queues to the exceeding CPUs
|
||||||
|
*/
|
||||||
|
next_queue = 0;
|
||||||
|
while (xdp_queue_number < efx->xdp_tx_queue_count) {
|
||||||
|
tx_queue = efx->xdp_tx_queues[next_queue++];
|
||||||
|
rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
|
||||||
|
if (rc == 0)
|
||||||
|
xdp_queue_number++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
|
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
|
||||||
{
|
{
|
||||||
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
|
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
|
||||||
|
@ -857,6 +936,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
|
||||||
efx_init_napi_channel(efx->channel[i]);
|
efx_init_napi_channel(efx->channel[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
efx_set_xdp_channels(efx);
|
||||||
out:
|
out:
|
||||||
/* Destroy unused channel structures */
|
/* Destroy unused channel structures */
|
||||||
for (i = 0; i < efx->n_channels; i++) {
|
for (i = 0; i < efx->n_channels; i++) {
|
||||||
|
@ -889,26 +969,9 @@ rollback:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
|
||||||
efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
|
|
||||||
struct efx_tx_queue *tx_queue)
|
|
||||||
{
|
|
||||||
if (xdp_queue_number >= efx->xdp_tx_queue_count)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
|
|
||||||
tx_queue->channel->channel, tx_queue->label,
|
|
||||||
xdp_queue_number, tx_queue->queue);
|
|
||||||
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int efx_set_channels(struct efx_nic *efx)
|
int efx_set_channels(struct efx_nic *efx)
|
||||||
{
|
{
|
||||||
struct efx_tx_queue *tx_queue;
|
|
||||||
struct efx_channel *channel;
|
struct efx_channel *channel;
|
||||||
unsigned int next_queue = 0;
|
|
||||||
int xdp_queue_number;
|
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
efx->tx_channel_offset =
|
efx->tx_channel_offset =
|
||||||
|
@ -926,61 +989,14 @@ int efx_set_channels(struct efx_nic *efx)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We need to mark which channels really have RX and TX
|
|
||||||
* queues, and adjust the TX queue numbers if we have separate
|
|
||||||
* RX-only and TX-only channels.
|
|
||||||
*/
|
|
||||||
xdp_queue_number = 0;
|
|
||||||
efx_for_each_channel(channel, efx) {
|
efx_for_each_channel(channel, efx) {
|
||||||
if (channel->channel < efx->n_rx_channels)
|
if (channel->channel < efx->n_rx_channels)
|
||||||
channel->rx_queue.core_index = channel->channel;
|
channel->rx_queue.core_index = channel->channel;
|
||||||
else
|
else
|
||||||
channel->rx_queue.core_index = -1;
|
channel->rx_queue.core_index = -1;
|
||||||
|
|
||||||
if (channel->channel >= efx->tx_channel_offset) {
|
|
||||||
if (efx_channel_is_xdp_tx(channel)) {
|
|
||||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
|
||||||
tx_queue->queue = next_queue++;
|
|
||||||
rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
|
|
||||||
if (rc == 0)
|
|
||||||
xdp_queue_number++;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
|
||||||
tx_queue->queue = next_queue++;
|
|
||||||
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
|
|
||||||
channel->channel, tx_queue->label,
|
|
||||||
tx_queue->queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If XDP is borrowing queues from net stack, it must use the queue
|
|
||||||
* with no csum offload, which is the first one of the channel
|
|
||||||
* (note: channel->tx_queue_by_type is not initialized yet)
|
|
||||||
*/
|
|
||||||
if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
|
|
||||||
tx_queue = &channel->tx_queue[0];
|
|
||||||
rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
|
|
||||||
if (rc == 0)
|
|
||||||
xdp_queue_number++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
|
|
||||||
xdp_queue_number != efx->xdp_tx_queue_count);
|
|
||||||
WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
|
|
||||||
xdp_queue_number > efx->xdp_tx_queue_count);
|
|
||||||
|
|
||||||
/* If we have more CPUs than assigned XDP TX queues, assign the already
|
efx_set_xdp_channels(efx);
|
||||||
* existing queues to the exceeding CPUs
|
|
||||||
*/
|
|
||||||
next_queue = 0;
|
|
||||||
while (xdp_queue_number < efx->xdp_tx_queue_count) {
|
|
||||||
tx_queue = efx->xdp_tx_queues[next_queue++];
|
|
||||||
rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
|
|
||||||
if (rc == 0)
|
|
||||||
xdp_queue_number++;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
|
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
|
||||||
if (rc)
|
if (rc)
|
||||||
|
@ -1124,7 +1140,7 @@ void efx_start_channels(struct efx_nic *efx)
|
||||||
struct efx_rx_queue *rx_queue;
|
struct efx_rx_queue *rx_queue;
|
||||||
struct efx_channel *channel;
|
struct efx_channel *channel;
|
||||||
|
|
||||||
efx_for_each_channel(channel, efx) {
|
efx_for_each_channel_rev(channel, efx) {
|
||||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||||
efx_init_tx_queue(tx_queue);
|
efx_init_tx_queue(tx_queue);
|
||||||
atomic_inc(&efx->active_queues);
|
atomic_inc(&efx->active_queues);
|
||||||
|
|
|
@ -150,6 +150,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
|
||||||
struct efx_nic *efx = rx_queue->efx;
|
struct efx_nic *efx = rx_queue->efx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (unlikely(!rx_queue->page_ring))
|
||||||
|
return;
|
||||||
|
|
||||||
/* Unmap and release the pages in the recycle ring. Remove the ring. */
|
/* Unmap and release the pages in the recycle ring. Remove the ring. */
|
||||||
for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
|
for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
|
||||||
struct page *page = rx_queue->page_ring[i];
|
struct page *page = rx_queue->page_ring[i];
|
||||||
|
|
|
@ -443,6 +443,9 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
|
||||||
if (unlikely(!tx_queue))
|
if (unlikely(!tx_queue))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!tx_queue->initialised)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
|
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
|
||||||
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
|
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
|
||||||
|
|
||||||
|
|
|
@ -101,6 +101,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
||||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||||
"shutting down TX queue %d\n", tx_queue->queue);
|
"shutting down TX queue %d\n", tx_queue->queue);
|
||||||
|
|
||||||
|
tx_queue->initialised = false;
|
||||||
|
|
||||||
if (!tx_queue->buffer)
|
if (!tx_queue->buffer)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
|
@ -205,7 +205,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = {
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
|
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
|
||||||
|
|
||||||
struct pci_driver loongson_dwmac_driver = {
|
static struct pci_driver loongson_dwmac_driver = {
|
||||||
.name = "dwmac-loongson-pci",
|
.name = "dwmac-loongson-pci",
|
||||||
.id_table = loongson_dwmac_id_table,
|
.id_table = loongson_dwmac_id_table,
|
||||||
.probe = loongson_dwmac_probe,
|
.probe = loongson_dwmac_probe,
|
||||||
|
|
|
@ -431,8 +431,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
||||||
plat->phylink_node = np;
|
plat->phylink_node = np;
|
||||||
|
|
||||||
/* Get max speed of operation from device tree */
|
/* Get max speed of operation from device tree */
|
||||||
if (of_property_read_u32(np, "max-speed", &plat->max_speed))
|
of_property_read_u32(np, "max-speed", &plat->max_speed);
|
||||||
plat->max_speed = -1;
|
|
||||||
|
|
||||||
plat->bus_id = of_alias_get_id(np, "ethernet");
|
plat->bus_id = of_alias_get_id(np, "ethernet");
|
||||||
if (plat->bus_id < 0)
|
if (plat->bus_id < 0)
|
||||||
|
|
|
@ -433,8 +433,6 @@ struct axienet_local {
|
||||||
struct net_device *ndev;
|
struct net_device *ndev;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
|
||||||
struct device_node *phy_node;
|
|
||||||
|
|
||||||
struct phylink *phylink;
|
struct phylink *phylink;
|
||||||
struct phylink_config phylink_config;
|
struct phylink_config phylink_config;
|
||||||
|
|
||||||
|
|
|
@ -2064,25 +2064,33 @@ static int axienet_probe(struct platform_device *pdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto cleanup_clk;
|
goto cleanup_clk;
|
||||||
|
|
||||||
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
|
ret = axienet_mdio_setup(lp);
|
||||||
if (lp->phy_node) {
|
if (ret)
|
||||||
ret = axienet_mdio_setup(lp);
|
dev_warn(&pdev->dev,
|
||||||
if (ret)
|
"error registering MDIO bus: %d\n", ret);
|
||||||
dev_warn(&pdev->dev,
|
|
||||||
"error registering MDIO bus: %d\n", ret);
|
|
||||||
}
|
|
||||||
if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
|
if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
|
||||||
lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
|
lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
|
||||||
if (!lp->phy_node) {
|
np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
|
||||||
dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
|
if (!np) {
|
||||||
|
/* Deprecated: Always use "pcs-handle" for pcs_phy.
|
||||||
|
* Falling back to "phy-handle" here is only for
|
||||||
|
* backward compatibility with old device trees.
|
||||||
|
*/
|
||||||
|
np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
|
||||||
|
}
|
||||||
|
if (!np) {
|
||||||
|
dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto cleanup_mdio;
|
goto cleanup_mdio;
|
||||||
}
|
}
|
||||||
lp->pcs_phy = of_mdio_find_device(lp->phy_node);
|
lp->pcs_phy = of_mdio_find_device(np);
|
||||||
if (!lp->pcs_phy) {
|
if (!lp->pcs_phy) {
|
||||||
ret = -EPROBE_DEFER;
|
ret = -EPROBE_DEFER;
|
||||||
|
of_node_put(np);
|
||||||
goto cleanup_mdio;
|
goto cleanup_mdio;
|
||||||
}
|
}
|
||||||
|
of_node_put(np);
|
||||||
lp->pcs.ops = &axienet_pcs_ops;
|
lp->pcs.ops = &axienet_pcs_ops;
|
||||||
lp->pcs.poll = true;
|
lp->pcs.poll = true;
|
||||||
}
|
}
|
||||||
|
@ -2125,8 +2133,6 @@ cleanup_mdio:
|
||||||
put_device(&lp->pcs_phy->dev);
|
put_device(&lp->pcs_phy->dev);
|
||||||
if (lp->mii_bus)
|
if (lp->mii_bus)
|
||||||
axienet_mdio_teardown(lp);
|
axienet_mdio_teardown(lp);
|
||||||
of_node_put(lp->phy_node);
|
|
||||||
|
|
||||||
cleanup_clk:
|
cleanup_clk:
|
||||||
clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
|
clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
|
||||||
clk_disable_unprepare(lp->axi_clk);
|
clk_disable_unprepare(lp->axi_clk);
|
||||||
|
@ -2155,9 +2161,6 @@ static int axienet_remove(struct platform_device *pdev)
|
||||||
clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
|
clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
|
||||||
clk_disable_unprepare(lp->axi_clk);
|
clk_disable_unprepare(lp->axi_clk);
|
||||||
|
|
||||||
of_node_put(lp->phy_node);
|
|
||||||
lp->phy_node = NULL;
|
|
||||||
|
|
||||||
free_netdev(ndev);
|
free_netdev(ndev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -553,7 +553,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
|
||||||
hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01;
|
hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01;
|
||||||
mhdr->ver = 0x01;
|
mhdr->ver = 0x01;
|
||||||
|
|
||||||
return 0;
|
return sizeof(struct mctp_i2c_hdr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mctp_i2c_tx_thread(void *data)
|
static int mctp_i2c_tx_thread(void *data)
|
||||||
|
|
|
@ -107,6 +107,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
|
||||||
u32 val;
|
u32 val;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (regnum & MII_ADDR_C45)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
ret = mscc_miim_wait_pending(bus);
|
ret = mscc_miim_wait_pending(bus);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -150,6 +153,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id,
|
||||||
struct mscc_miim_dev *miim = bus->priv;
|
struct mscc_miim_dev *miim = bus->priv;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (regnum & MII_ADDR_C45)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
ret = mscc_miim_wait_pending(bus);
|
ret = mscc_miim_wait_pending(bus);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -99,15 +99,6 @@
|
||||||
#define PTP_TIMESTAMP_EN_PDREQ_ BIT(2)
|
#define PTP_TIMESTAMP_EN_PDREQ_ BIT(2)
|
||||||
#define PTP_TIMESTAMP_EN_PDRES_ BIT(3)
|
#define PTP_TIMESTAMP_EN_PDRES_ BIT(3)
|
||||||
|
|
||||||
#define PTP_RX_LATENCY_1000 0x0224
|
|
||||||
#define PTP_TX_LATENCY_1000 0x0225
|
|
||||||
|
|
||||||
#define PTP_RX_LATENCY_100 0x0222
|
|
||||||
#define PTP_TX_LATENCY_100 0x0223
|
|
||||||
|
|
||||||
#define PTP_RX_LATENCY_10 0x0220
|
|
||||||
#define PTP_TX_LATENCY_10 0x0221
|
|
||||||
|
|
||||||
#define PTP_TX_PARSE_L2_ADDR_EN 0x0284
|
#define PTP_TX_PARSE_L2_ADDR_EN 0x0284
|
||||||
#define PTP_RX_PARSE_L2_ADDR_EN 0x0244
|
#define PTP_RX_PARSE_L2_ADDR_EN 0x0244
|
||||||
|
|
||||||
|
@ -268,15 +259,6 @@ struct lan8814_ptp_rx_ts {
|
||||||
u16 seq_id;
|
u16 seq_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kszphy_latencies {
|
|
||||||
u16 rx_10;
|
|
||||||
u16 tx_10;
|
|
||||||
u16 rx_100;
|
|
||||||
u16 tx_100;
|
|
||||||
u16 rx_1000;
|
|
||||||
u16 tx_1000;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct kszphy_ptp_priv {
|
struct kszphy_ptp_priv {
|
||||||
struct mii_timestamper mii_ts;
|
struct mii_timestamper mii_ts;
|
||||||
struct phy_device *phydev;
|
struct phy_device *phydev;
|
||||||
|
@ -296,7 +278,6 @@ struct kszphy_ptp_priv {
|
||||||
|
|
||||||
struct kszphy_priv {
|
struct kszphy_priv {
|
||||||
struct kszphy_ptp_priv ptp_priv;
|
struct kszphy_ptp_priv ptp_priv;
|
||||||
struct kszphy_latencies latencies;
|
|
||||||
const struct kszphy_type *type;
|
const struct kszphy_type *type;
|
||||||
int led_mode;
|
int led_mode;
|
||||||
bool rmii_ref_clk_sel;
|
bool rmii_ref_clk_sel;
|
||||||
|
@ -304,14 +285,6 @@ struct kszphy_priv {
|
||||||
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
|
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct kszphy_latencies lan8814_latencies = {
|
|
||||||
.rx_10 = 0x22AA,
|
|
||||||
.tx_10 = 0x2E4A,
|
|
||||||
.rx_100 = 0x092A,
|
|
||||||
.tx_100 = 0x02C1,
|
|
||||||
.rx_1000 = 0x01AD,
|
|
||||||
.tx_1000 = 0x00C9,
|
|
||||||
};
|
|
||||||
static const struct kszphy_type ksz8021_type = {
|
static const struct kszphy_type ksz8021_type = {
|
||||||
.led_mode_reg = MII_KSZPHY_CTRL_2,
|
.led_mode_reg = MII_KSZPHY_CTRL_2,
|
||||||
.has_broadcast_disable = true,
|
.has_broadcast_disable = true,
|
||||||
|
@ -2618,55 +2591,6 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int lan8814_read_status(struct phy_device *phydev)
|
|
||||||
{
|
|
||||||
struct kszphy_priv *priv = phydev->priv;
|
|
||||||
struct kszphy_latencies *latencies = &priv->latencies;
|
|
||||||
int err;
|
|
||||||
int regval;
|
|
||||||
|
|
||||||
err = genphy_read_status(phydev);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
switch (phydev->speed) {
|
|
||||||
case SPEED_1000:
|
|
||||||
lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_1000,
|
|
||||||
latencies->rx_1000);
|
|
||||||
lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_1000,
|
|
||||||
latencies->tx_1000);
|
|
||||||
break;
|
|
||||||
case SPEED_100:
|
|
||||||
lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_100,
|
|
||||||
latencies->rx_100);
|
|
||||||
lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_100,
|
|
||||||
latencies->tx_100);
|
|
||||||
break;
|
|
||||||
case SPEED_10:
|
|
||||||
lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_10,
|
|
||||||
latencies->rx_10);
|
|
||||||
lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_10,
|
|
||||||
latencies->tx_10);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Make sure the PHY is not broken. Read idle error count,
|
|
||||||
* and reset the PHY if it is maxed out.
|
|
||||||
*/
|
|
||||||
regval = phy_read(phydev, MII_STAT1000);
|
|
||||||
if ((regval & 0xFF) == 0xFF) {
|
|
||||||
phy_init_hw(phydev);
|
|
||||||
phydev->link = 0;
|
|
||||||
if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
|
|
||||||
phydev->drv->config_intr(phydev);
|
|
||||||
return genphy_config_aneg(phydev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int lan8814_config_init(struct phy_device *phydev)
|
static int lan8814_config_init(struct phy_device *phydev)
|
||||||
{
|
{
|
||||||
int val;
|
int val;
|
||||||
|
@ -2690,30 +2614,8 @@ static int lan8814_config_init(struct phy_device *phydev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void lan8814_parse_latency(struct phy_device *phydev)
|
|
||||||
{
|
|
||||||
const struct device_node *np = phydev->mdio.dev.of_node;
|
|
||||||
struct kszphy_priv *priv = phydev->priv;
|
|
||||||
struct kszphy_latencies *latency = &priv->latencies;
|
|
||||||
u32 val;
|
|
||||||
|
|
||||||
if (!of_property_read_u32(np, "lan8814,latency_rx_10", &val))
|
|
||||||
latency->rx_10 = val;
|
|
||||||
if (!of_property_read_u32(np, "lan8814,latency_tx_10", &val))
|
|
||||||
latency->tx_10 = val;
|
|
||||||
if (!of_property_read_u32(np, "lan8814,latency_rx_100", &val))
|
|
||||||
latency->rx_100 = val;
|
|
||||||
if (!of_property_read_u32(np, "lan8814,latency_tx_100", &val))
|
|
||||||
latency->tx_100 = val;
|
|
||||||
if (!of_property_read_u32(np, "lan8814,latency_rx_1000", &val))
|
|
||||||
latency->rx_1000 = val;
|
|
||||||
if (!of_property_read_u32(np, "lan8814,latency_tx_1000", &val))
|
|
||||||
latency->tx_1000 = val;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int lan8814_probe(struct phy_device *phydev)
|
static int lan8814_probe(struct phy_device *phydev)
|
||||||
{
|
{
|
||||||
const struct device_node *np = phydev->mdio.dev.of_node;
|
|
||||||
struct kszphy_priv *priv;
|
struct kszphy_priv *priv;
|
||||||
u16 addr;
|
u16 addr;
|
||||||
int err;
|
int err;
|
||||||
|
@ -2724,13 +2626,10 @@ static int lan8814_probe(struct phy_device *phydev)
|
||||||
|
|
||||||
priv->led_mode = -1;
|
priv->led_mode = -1;
|
||||||
|
|
||||||
priv->latencies = lan8814_latencies;
|
|
||||||
|
|
||||||
phydev->priv = priv;
|
phydev->priv = priv;
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
|
if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
|
||||||
!IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) ||
|
!IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
|
||||||
of_property_read_bool(np, "lan8814,ignore-ts"))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Strap-in value for PHY address, below register read gives starting
|
/* Strap-in value for PHY address, below register read gives starting
|
||||||
|
@ -2746,7 +2645,6 @@ static int lan8814_probe(struct phy_device *phydev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
lan8814_parse_latency(phydev);
|
|
||||||
lan8814_ptp_init(phydev);
|
lan8814_ptp_init(phydev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2928,7 +2826,7 @@ static struct phy_driver ksphy_driver[] = {
|
||||||
.config_init = lan8814_config_init,
|
.config_init = lan8814_config_init,
|
||||||
.probe = lan8814_probe,
|
.probe = lan8814_probe,
|
||||||
.soft_reset = genphy_soft_reset,
|
.soft_reset = genphy_soft_reset,
|
||||||
.read_status = lan8814_read_status,
|
.read_status = ksz9031_read_status,
|
||||||
.get_sset_count = kszphy_get_sset_count,
|
.get_sset_count = kszphy_get_sset_count,
|
||||||
.get_strings = kszphy_get_strings,
|
.get_strings = kszphy_get_strings,
|
||||||
.get_stats = kszphy_get_stats,
|
.get_stats = kszphy_get_stats,
|
||||||
|
|
|
@ -469,7 +469,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
|
||||||
spin_lock(&sl->lock);
|
spin_lock(&sl->lock);
|
||||||
|
|
||||||
if (netif_queue_stopped(dev)) {
|
if (netif_queue_stopped(dev)) {
|
||||||
if (!netif_running(dev))
|
if (!netif_running(dev) || !sl->tty)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* May be we must check transmitter timeout here ?
|
/* May be we must check transmitter timeout here ?
|
||||||
|
|
|
@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||||
if (start_of_descs != desc_offset)
|
if (start_of_descs != desc_offset)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
/* self check desc_offset from header*/
|
/* self check desc_offset from header and make sure that the
|
||||||
if (desc_offset >= skb_len)
|
* bounds of the metadata array are inside the SKB
|
||||||
|
*/
|
||||||
|
if (pkt_count * 2 + desc_offset >= skb_len)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
/* Packets must not overlap the metadata array */
|
||||||
|
skb_trim(skb, desc_offset);
|
||||||
|
|
||||||
if (pkt_count == 0)
|
if (pkt_count == 0)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
|
|
@ -1265,6 +1265,7 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
|
||||||
eth = (struct ethhdr *)skb->data;
|
eth = (struct ethhdr *)skb->data;
|
||||||
|
|
||||||
skb_reset_mac_header(skb);
|
skb_reset_mac_header(skb);
|
||||||
|
skb_reset_mac_len(skb);
|
||||||
|
|
||||||
/* we set the ethernet destination and the source addresses to the
|
/* we set the ethernet destination and the source addresses to the
|
||||||
* address of the VRF device.
|
* address of the VRF device.
|
||||||
|
@ -1294,9 +1295,9 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
|
||||||
*/
|
*/
|
||||||
static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
|
static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
|
||||||
struct net_device *vrf_dev,
|
struct net_device *vrf_dev,
|
||||||
u16 proto)
|
u16 proto, struct net_device *orig_dev)
|
||||||
{
|
{
|
||||||
if (skb_mac_header_was_set(skb))
|
if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return vrf_prepare_mac_header(skb, vrf_dev, proto);
|
return vrf_prepare_mac_header(skb, vrf_dev, proto);
|
||||||
|
@ -1402,6 +1403,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
|
||||||
|
|
||||||
/* if packet is NDISC then keep the ingress interface */
|
/* if packet is NDISC then keep the ingress interface */
|
||||||
if (!is_ndisc) {
|
if (!is_ndisc) {
|
||||||
|
struct net_device *orig_dev = skb->dev;
|
||||||
|
|
||||||
vrf_rx_stats(vrf_dev, skb->len);
|
vrf_rx_stats(vrf_dev, skb->len);
|
||||||
skb->dev = vrf_dev;
|
skb->dev = vrf_dev;
|
||||||
skb->skb_iif = vrf_dev->ifindex;
|
skb->skb_iif = vrf_dev->ifindex;
|
||||||
|
@ -1410,7 +1413,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = vrf_add_mac_header_if_unset(skb, vrf_dev,
|
err = vrf_add_mac_header_if_unset(skb, vrf_dev,
|
||||||
ETH_P_IPV6);
|
ETH_P_IPV6,
|
||||||
|
orig_dev);
|
||||||
if (likely(!err)) {
|
if (likely(!err)) {
|
||||||
skb_push(skb, skb->mac_len);
|
skb_push(skb, skb->mac_len);
|
||||||
dev_queue_xmit_nit(skb, vrf_dev);
|
dev_queue_xmit_nit(skb, vrf_dev);
|
||||||
|
@ -1440,6 +1444,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
|
||||||
static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
|
static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
struct net_device *orig_dev = skb->dev;
|
||||||
|
|
||||||
skb->dev = vrf_dev;
|
skb->dev = vrf_dev;
|
||||||
skb->skb_iif = vrf_dev->ifindex;
|
skb->skb_iif = vrf_dev->ifindex;
|
||||||
IPCB(skb)->flags |= IPSKB_L3SLAVE;
|
IPCB(skb)->flags |= IPSKB_L3SLAVE;
|
||||||
|
@ -1460,7 +1466,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
|
||||||
if (!list_empty(&vrf_dev->ptype_all)) {
|
if (!list_empty(&vrf_dev->ptype_all)) {
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP);
|
err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP,
|
||||||
|
orig_dev);
|
||||||
if (likely(!err)) {
|
if (likely(!err)) {
|
||||||
skb_push(skb, skb->mac_len);
|
skb_push(skb, skb->mac_len);
|
||||||
dev_queue_xmit_nit(skb, vrf_dev);
|
dev_queue_xmit_nit(skb, vrf_dev);
|
||||||
|
|
|
@ -3407,6 +3407,15 @@ static int hv_pci_probe(struct hv_device *hdev,
|
||||||
hbus->bridge->domain_nr = dom;
|
hbus->bridge->domain_nr = dom;
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
hbus->sysdata.domain = dom;
|
hbus->sysdata.domain = dom;
|
||||||
|
#elif defined(CONFIG_ARM64)
|
||||||
|
/*
|
||||||
|
* Set the PCI bus parent to be the corresponding VMbus
|
||||||
|
* device. Then the VMbus device will be assigned as the
|
||||||
|
* ACPI companion in pcibios_root_bridge_prepare() and
|
||||||
|
* pci_dma_configure() will propagate device coherence
|
||||||
|
* information to devices created on the bus.
|
||||||
|
*/
|
||||||
|
hbus->sysdata.parent = hdev->device.parent;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
hbus->hdev = hdev;
|
hbus->hdev = hdev;
|
||||||
|
|
|
@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
|
||||||
u32 cur_num_vqs;
|
u32 cur_num_vqs;
|
||||||
struct notifier_block nb;
|
struct notifier_block nb;
|
||||||
struct vdpa_callback config_cb;
|
struct vdpa_callback config_cb;
|
||||||
|
struct mlx5_vdpa_wq_ent cvq_ent;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void free_resources(struct mlx5_vdpa_net *ndev);
|
static void free_resources(struct mlx5_vdpa_net *ndev);
|
||||||
|
@ -1658,6 +1659,12 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
|
||||||
mvdev = wqent->mvdev;
|
mvdev = wqent->mvdev;
|
||||||
ndev = to_mlx5_vdpa_ndev(mvdev);
|
ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
cvq = &mvdev->cvq;
|
cvq = &mvdev->cvq;
|
||||||
|
|
||||||
|
mutex_lock(&ndev->reslock);
|
||||||
|
|
||||||
|
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
|
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -1696,9 +1703,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
|
||||||
|
|
||||||
if (vringh_need_notify_iotlb(&cvq->vring))
|
if (vringh_need_notify_iotlb(&cvq->vring))
|
||||||
vringh_notify(&cvq->vring);
|
vringh_notify(&cvq->vring);
|
||||||
|
|
||||||
|
queue_work(mvdev->wq, &wqent->work);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
kfree(wqent);
|
mutex_unlock(&ndev->reslock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
|
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
|
||||||
|
@ -1706,7 +1717,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
|
||||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
struct mlx5_vdpa_virtqueue *mvq;
|
struct mlx5_vdpa_virtqueue *mvq;
|
||||||
struct mlx5_vdpa_wq_ent *wqent;
|
|
||||||
|
|
||||||
if (!is_index_valid(mvdev, idx))
|
if (!is_index_valid(mvdev, idx))
|
||||||
return;
|
return;
|
||||||
|
@ -1715,13 +1725,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
|
||||||
if (!mvdev->wq || !mvdev->cvq.ready)
|
if (!mvdev->wq || !mvdev->cvq.ready)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
|
queue_work(mvdev->wq, &ndev->cvq_ent.work);
|
||||||
if (!wqent)
|
|
||||||
return;
|
|
||||||
|
|
||||||
wqent->mvdev = mvdev;
|
|
||||||
INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
|
|
||||||
queue_work(mvdev->wq, &wqent->work);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2180,7 +2184,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
|
||||||
goto err_mr;
|
goto err_mr;
|
||||||
|
|
||||||
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||||
return 0;
|
goto err_mr;
|
||||||
|
|
||||||
restore_channels_info(ndev);
|
restore_channels_info(ndev);
|
||||||
err = setup_driver(mvdev);
|
err = setup_driver(mvdev);
|
||||||
|
@ -2195,12 +2199,14 @@ err_mr:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* reslock must be held for this function */
|
||||||
static int setup_driver(struct mlx5_vdpa_dev *mvdev)
|
static int setup_driver(struct mlx5_vdpa_dev *mvdev)
|
||||||
{
|
{
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mutex_lock(&ndev->reslock);
|
WARN_ON(!mutex_is_locked(&ndev->reslock));
|
||||||
|
|
||||||
if (ndev->setup) {
|
if (ndev->setup) {
|
||||||
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
|
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
|
||||||
err = 0;
|
err = 0;
|
||||||
|
@ -2230,7 +2236,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
|
||||||
goto err_fwd;
|
goto err_fwd;
|
||||||
}
|
}
|
||||||
ndev->setup = true;
|
ndev->setup = true;
|
||||||
mutex_unlock(&ndev->reslock);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -2241,23 +2246,23 @@ err_tir:
|
||||||
err_rqt:
|
err_rqt:
|
||||||
teardown_virtqueues(ndev);
|
teardown_virtqueues(ndev);
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&ndev->reslock);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* reslock must be held for this function */
|
||||||
static void teardown_driver(struct mlx5_vdpa_net *ndev)
|
static void teardown_driver(struct mlx5_vdpa_net *ndev)
|
||||||
{
|
{
|
||||||
mutex_lock(&ndev->reslock);
|
|
||||||
|
WARN_ON(!mutex_is_locked(&ndev->reslock));
|
||||||
|
|
||||||
if (!ndev->setup)
|
if (!ndev->setup)
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
remove_fwd_to_tir(ndev);
|
remove_fwd_to_tir(ndev);
|
||||||
destroy_tir(ndev);
|
destroy_tir(ndev);
|
||||||
destroy_rqt(ndev);
|
destroy_rqt(ndev);
|
||||||
teardown_virtqueues(ndev);
|
teardown_virtqueues(ndev);
|
||||||
ndev->setup = false;
|
ndev->setup = false;
|
||||||
out:
|
|
||||||
mutex_unlock(&ndev->reslock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
|
static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
|
||||||
|
@ -2278,6 +2283,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
|
||||||
|
|
||||||
print_status(mvdev, status, true);
|
print_status(mvdev, status, true);
|
||||||
|
|
||||||
|
mutex_lock(&ndev->reslock);
|
||||||
|
|
||||||
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
|
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
|
||||||
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
|
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
|
||||||
err = setup_driver(mvdev);
|
err = setup_driver(mvdev);
|
||||||
|
@ -2287,16 +2294,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
|
mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
|
||||||
return;
|
goto err_clear;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ndev->mvdev.status = status;
|
ndev->mvdev.status = status;
|
||||||
|
mutex_unlock(&ndev->reslock);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
err_setup:
|
err_setup:
|
||||||
mlx5_vdpa_destroy_mr(&ndev->mvdev);
|
mlx5_vdpa_destroy_mr(&ndev->mvdev);
|
||||||
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
|
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
|
||||||
|
err_clear:
|
||||||
|
mutex_unlock(&ndev->reslock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
||||||
|
@ -2306,6 +2316,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
||||||
|
|
||||||
print_status(mvdev, 0, true);
|
print_status(mvdev, 0, true);
|
||||||
mlx5_vdpa_info(mvdev, "performing device reset\n");
|
mlx5_vdpa_info(mvdev, "performing device reset\n");
|
||||||
|
|
||||||
|
mutex_lock(&ndev->reslock);
|
||||||
teardown_driver(ndev);
|
teardown_driver(ndev);
|
||||||
clear_vqs_ready(ndev);
|
clear_vqs_ready(ndev);
|
||||||
mlx5_vdpa_destroy_mr(&ndev->mvdev);
|
mlx5_vdpa_destroy_mr(&ndev->mvdev);
|
||||||
|
@ -2318,6 +2330,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
||||||
if (mlx5_vdpa_create_mr(mvdev, NULL))
|
if (mlx5_vdpa_create_mr(mvdev, NULL))
|
||||||
mlx5_vdpa_warn(mvdev, "create MR failed\n");
|
mlx5_vdpa_warn(mvdev, "create MR failed\n");
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&ndev->reslock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2353,19 +2366,24 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
|
||||||
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
|
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
|
||||||
{
|
{
|
||||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||||
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
bool change_map;
|
bool change_map;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
mutex_lock(&ndev->reslock);
|
||||||
|
|
||||||
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
|
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
|
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
|
||||||
return err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (change_map)
|
if (change_map)
|
||||||
return mlx5_vdpa_change_map(mvdev, iotlb);
|
err = mlx5_vdpa_change_map(mvdev, iotlb);
|
||||||
|
|
||||||
return 0;
|
err:
|
||||||
|
mutex_unlock(&ndev->reslock);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_vdpa_free(struct vdpa_device *vdev)
|
static void mlx5_vdpa_free(struct vdpa_device *vdev)
|
||||||
|
@ -2740,6 +2758,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
|
||||||
if (err)
|
if (err)
|
||||||
goto err_mr;
|
goto err_mr;
|
||||||
|
|
||||||
|
ndev->cvq_ent.mvdev = mvdev;
|
||||||
|
INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
|
||||||
mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
|
mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
|
||||||
if (!mvdev->wq) {
|
if (!mvdev->wq) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
|
|
|
@ -526,9 +526,8 @@ int virtio_device_restore(struct virtio_device *dev)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If restore didn't do it, mark device DRIVER_OK ourselves. */
|
/* Finally, tell the device we're all set */
|
||||||
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
|
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
|
||||||
virtio_device_ready(dev);
|
|
||||||
|
|
||||||
virtio_config_enable(dev);
|
virtio_config_enable(dev);
|
||||||
|
|
||||||
|
|
|
@ -118,7 +118,7 @@ struct btrfs_bio_ctrl {
|
||||||
*/
|
*/
|
||||||
struct extent_changeset {
|
struct extent_changeset {
|
||||||
/* How many bytes are set/cleared in this operation */
|
/* How many bytes are set/cleared in this operation */
|
||||||
unsigned int bytes_changed;
|
u64 bytes_changed;
|
||||||
|
|
||||||
/* Changed ranges */
|
/* Changed ranges */
|
||||||
struct ulist range_changed;
|
struct ulist range_changed;
|
||||||
|
|
|
@ -2957,8 +2957,9 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
|
||||||
{
|
{
|
||||||
|
struct inode *inode = file_inode(file);
|
||||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||||
struct extent_state *cached_state = NULL;
|
struct extent_state *cached_state = NULL;
|
||||||
|
@ -2990,6 +2991,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||||
goto out_only_mutex;
|
goto out_only_mutex;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = file_modified(file);
|
||||||
|
if (ret)
|
||||||
|
goto out_only_mutex;
|
||||||
|
|
||||||
lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
|
lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
|
||||||
lockend = round_down(offset + len,
|
lockend = round_down(offset + len,
|
||||||
btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
|
btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
|
||||||
|
@ -3430,7 +3435,7 @@ static long btrfs_fallocate(struct file *file, int mode,
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (mode & FALLOC_FL_PUNCH_HOLE)
|
if (mode & FALLOC_FL_PUNCH_HOLE)
|
||||||
return btrfs_punch_hole(inode, offset, len);
|
return btrfs_punch_hole(file, offset, len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only trigger disk allocation, don't trigger qgroup reserve
|
* Only trigger disk allocation, don't trigger qgroup reserve
|
||||||
|
@ -3452,6 +3457,10 @@ static long btrfs_fallocate(struct file *file, int mode,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = file_modified(file);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: Move these two operations after we have checked
|
* TODO: Move these two operations after we have checked
|
||||||
* accurate reserved space, or fallocate can still fail but
|
* accurate reserved space, or fallocate can still fail but
|
||||||
|
|
|
@ -1128,7 +1128,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (btrfs_is_free_space_inode(inode)) {
|
if (btrfs_is_free_space_inode(inode)) {
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
@ -4488,6 +4487,13 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
|
||||||
dest->root_key.objectid);
|
dest->root_key.objectid);
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
if (atomic_read(&dest->nr_swapfiles)) {
|
||||||
|
spin_unlock(&dest->root_item_lock);
|
||||||
|
btrfs_warn(fs_info,
|
||||||
|
"attempt to delete subvolume %llu with active swapfile",
|
||||||
|
root->root_key.objectid);
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
root_flags = btrfs_root_flags(&dest->root_item);
|
root_flags = btrfs_root_flags(&dest->root_item);
|
||||||
btrfs_set_root_flags(&dest->root_item,
|
btrfs_set_root_flags(&dest->root_item,
|
||||||
root_flags | BTRFS_ROOT_SUBVOL_DEAD);
|
root_flags | BTRFS_ROOT_SUBVOL_DEAD);
|
||||||
|
@ -11107,8 +11113,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||||||
* set. We use this counter to prevent snapshots. We must increment it
|
* set. We use this counter to prevent snapshots. We must increment it
|
||||||
* before walking the extents because we don't want a concurrent
|
* before walking the extents because we don't want a concurrent
|
||||||
* snapshot to run after we've already checked the extents.
|
* snapshot to run after we've already checked the extents.
|
||||||
|
*
|
||||||
|
* It is possible that subvolume is marked for deletion but still not
|
||||||
|
* removed yet. To prevent this race, we check the root status before
|
||||||
|
* activating the swapfile.
|
||||||
*/
|
*/
|
||||||
|
spin_lock(&root->root_item_lock);
|
||||||
|
if (btrfs_root_dead(root)) {
|
||||||
|
spin_unlock(&root->root_item_lock);
|
||||||
|
|
||||||
|
btrfs_exclop_finish(fs_info);
|
||||||
|
btrfs_warn(fs_info,
|
||||||
|
"cannot activate swapfile because subvolume %llu is being deleted",
|
||||||
|
root->root_key.objectid);
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
atomic_inc(&root->nr_swapfiles);
|
atomic_inc(&root->nr_swapfiles);
|
||||||
|
spin_unlock(&root->root_item_lock);
|
||||||
|
|
||||||
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
|
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
|
||||||
|
|
||||||
|
|
|
@ -1239,7 +1239,7 @@ static u32 get_extent_max_capacity(const struct extent_map *em)
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
|
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
|
||||||
bool locked)
|
u32 extent_thresh, u64 newer_than, bool locked)
|
||||||
{
|
{
|
||||||
struct extent_map *next;
|
struct extent_map *next;
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
@ -1249,11 +1249,12 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We want to check if the next extent can be merged with the current
|
* Here we need to pass @newer_then when checking the next extent, or
|
||||||
* one, which can be an extent created in a past generation, so we pass
|
* we will hit a case we mark current extent for defrag, but the next
|
||||||
* a minimum generation of 0 to defrag_lookup_extent().
|
* one will not be a target.
|
||||||
|
* This will just cause extra IO without really reducing the fragments.
|
||||||
*/
|
*/
|
||||||
next = defrag_lookup_extent(inode, em->start + em->len, 0, locked);
|
next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
|
||||||
/* No more em or hole */
|
/* No more em or hole */
|
||||||
if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
|
if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1265,6 +1266,13 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
|
||||||
*/
|
*/
|
||||||
if (next->len >= get_extent_max_capacity(em))
|
if (next->len >= get_extent_max_capacity(em))
|
||||||
goto out;
|
goto out;
|
||||||
|
/* Skip older extent */
|
||||||
|
if (next->generation < newer_than)
|
||||||
|
goto out;
|
||||||
|
/* Also check extent size */
|
||||||
|
if (next->len >= extent_thresh)
|
||||||
|
goto out;
|
||||||
|
|
||||||
ret = true;
|
ret = true;
|
||||||
out:
|
out:
|
||||||
free_extent_map(next);
|
free_extent_map(next);
|
||||||
|
@ -1470,7 +1478,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
|
||||||
goto next;
|
goto next;
|
||||||
|
|
||||||
next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
|
next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
|
||||||
locked);
|
extent_thresh, newer_than, locked);
|
||||||
if (!next_mergeable) {
|
if (!next_mergeable) {
|
||||||
struct defrag_target_range *last;
|
struct defrag_target_range *last;
|
||||||
|
|
||||||
|
|
|
@ -1896,23 +1896,18 @@ static void update_dev_time(const char *device_path)
|
||||||
path_put(&path);
|
path_put(&path);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btrfs_rm_dev_item(struct btrfs_device *device)
|
static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_device *device)
|
||||||
{
|
{
|
||||||
struct btrfs_root *root = device->fs_info->chunk_root;
|
struct btrfs_root *root = device->fs_info->chunk_root;
|
||||||
int ret;
|
int ret;
|
||||||
struct btrfs_path *path;
|
struct btrfs_path *path;
|
||||||
struct btrfs_key key;
|
struct btrfs_key key;
|
||||||
struct btrfs_trans_handle *trans;
|
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 0);
|
|
||||||
if (IS_ERR(trans)) {
|
|
||||||
btrfs_free_path(path);
|
|
||||||
return PTR_ERR(trans);
|
|
||||||
}
|
|
||||||
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
|
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
|
||||||
key.type = BTRFS_DEV_ITEM_KEY;
|
key.type = BTRFS_DEV_ITEM_KEY;
|
||||||
key.offset = device->devid;
|
key.offset = device->devid;
|
||||||
|
@ -1923,21 +1918,12 @@ static int btrfs_rm_dev_item(struct btrfs_device *device)
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
ret = -ENOENT;
|
ret = -ENOENT;
|
||||||
btrfs_abort_transaction(trans, ret);
|
|
||||||
btrfs_end_transaction(trans);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_del_item(trans, root, path);
|
ret = btrfs_del_item(trans, root, path);
|
||||||
if (ret) {
|
|
||||||
btrfs_abort_transaction(trans, ret);
|
|
||||||
btrfs_end_transaction(trans);
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
if (!ret)
|
|
||||||
ret = btrfs_commit_transaction(trans);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2078,6 +2064,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
|
||||||
struct btrfs_dev_lookup_args *args,
|
struct btrfs_dev_lookup_args *args,
|
||||||
struct block_device **bdev, fmode_t *mode)
|
struct block_device **bdev, fmode_t *mode)
|
||||||
{
|
{
|
||||||
|
struct btrfs_trans_handle *trans;
|
||||||
struct btrfs_device *device;
|
struct btrfs_device *device;
|
||||||
struct btrfs_fs_devices *cur_devices;
|
struct btrfs_fs_devices *cur_devices;
|
||||||
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
||||||
|
@ -2098,7 +2085,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
|
ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
return ret;
|
||||||
|
|
||||||
device = btrfs_find_device(fs_info->fs_devices, args);
|
device = btrfs_find_device(fs_info->fs_devices, args);
|
||||||
if (!device) {
|
if (!device) {
|
||||||
|
@ -2106,27 +2093,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
|
||||||
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
|
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
|
||||||
else
|
else
|
||||||
ret = -ENOENT;
|
ret = -ENOENT;
|
||||||
goto out;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (btrfs_pinned_by_swapfile(fs_info, device)) {
|
if (btrfs_pinned_by_swapfile(fs_info, device)) {
|
||||||
btrfs_warn_in_rcu(fs_info,
|
btrfs_warn_in_rcu(fs_info,
|
||||||
"cannot remove device %s (devid %llu) due to active swapfile",
|
"cannot remove device %s (devid %llu) due to active swapfile",
|
||||||
rcu_str_deref(device->name), device->devid);
|
rcu_str_deref(device->name), device->devid);
|
||||||
ret = -ETXTBSY;
|
return -ETXTBSY;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
|
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
|
||||||
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
|
return BTRFS_ERROR_DEV_TGT_REPLACE;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
|
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
|
||||||
fs_info->fs_devices->rw_devices == 1) {
|
fs_info->fs_devices->rw_devices == 1)
|
||||||
ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
|
return BTRFS_ERROR_DEV_ONLY_WRITABLE;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
|
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
|
||||||
mutex_lock(&fs_info->chunk_mutex);
|
mutex_lock(&fs_info->chunk_mutex);
|
||||||
|
@ -2139,14 +2121,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error_undo;
|
goto error_undo;
|
||||||
|
|
||||||
/*
|
trans = btrfs_start_transaction(fs_info->chunk_root, 0);
|
||||||
* TODO: the superblock still includes this device in its num_devices
|
if (IS_ERR(trans)) {
|
||||||
* counter although write_all_supers() is not locked out. This
|
ret = PTR_ERR(trans);
|
||||||
* could give a filesystem state which requires a degraded mount.
|
|
||||||
*/
|
|
||||||
ret = btrfs_rm_dev_item(device);
|
|
||||||
if (ret)
|
|
||||||
goto error_undo;
|
goto error_undo;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = btrfs_rm_dev_item(trans, device);
|
||||||
|
if (ret) {
|
||||||
|
/* Any error in dev item removal is critical */
|
||||||
|
btrfs_crit(fs_info,
|
||||||
|
"failed to remove device item for devid %llu: %d",
|
||||||
|
device->devid, ret);
|
||||||
|
btrfs_abort_transaction(trans, ret);
|
||||||
|
btrfs_end_transaction(trans);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
|
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
|
||||||
btrfs_scrub_cancel_dev(device);
|
btrfs_scrub_cancel_dev(device);
|
||||||
|
@ -2229,7 +2219,8 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
|
||||||
free_fs_devices(cur_devices);
|
free_fs_devices(cur_devices);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
ret = btrfs_commit_transaction(trans);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
error_undo:
|
error_undo:
|
||||||
|
@ -2240,7 +2231,7 @@ error_undo:
|
||||||
device->fs_devices->rw_devices++;
|
device->fs_devices->rw_devices++;
|
||||||
mutex_unlock(&fs_info->chunk_mutex);
|
mutex_unlock(&fs_info->chunk_mutex);
|
||||||
}
|
}
|
||||||
goto out;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
|
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
|
||||||
|
|
|
@ -1801,7 +1801,6 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
map = em->map_lookup;
|
map = em->map_lookup;
|
||||||
/* We only support single profile for now */
|
/* We only support single profile for now */
|
||||||
ASSERT(map->num_stripes == 1);
|
|
||||||
device = map->stripes[0].dev;
|
device = map->stripes[0].dev;
|
||||||
|
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
|
@ -1976,18 +1975,16 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
|
||||||
|
|
||||||
bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
|
bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
|
||||||
{
|
{
|
||||||
|
struct btrfs_fs_info *fs_info = fs_devices->fs_info;
|
||||||
struct btrfs_device *device;
|
struct btrfs_device *device;
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
|
||||||
if (!btrfs_is_zoned(fs_devices->fs_info))
|
if (!btrfs_is_zoned(fs_info))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
/* Non-single profiles are not supported yet */
|
|
||||||
ASSERT((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0);
|
|
||||||
|
|
||||||
/* Check if there is a device with active zones left */
|
/* Check if there is a device with active zones left */
|
||||||
mutex_lock(&fs_devices->device_list_mutex);
|
mutex_lock(&fs_info->chunk_mutex);
|
||||||
list_for_each_entry(device, &fs_devices->devices, dev_list) {
|
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
|
||||||
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
||||||
|
|
||||||
if (!device->bdev)
|
if (!device->bdev)
|
||||||
|
@ -1999,7 +1996,7 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&fs_devices->device_list_mutex);
|
mutex_unlock(&fs_info->chunk_mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -269,6 +269,7 @@ bool hv_isolation_type_snp(void);
|
||||||
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
|
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
|
||||||
void hyperv_cleanup(void);
|
void hyperv_cleanup(void);
|
||||||
bool hv_query_ext_cap(u64 cap_query);
|
bool hv_query_ext_cap(u64 cap_query);
|
||||||
|
void hv_setup_dma_ops(struct device *dev, bool coherent);
|
||||||
void *hv_map_memory(void *addr, unsigned long size);
|
void *hv_map_memory(void *addr, unsigned long size);
|
||||||
void hv_unmap_memory(void *addr);
|
void hv_unmap_memory(void *addr);
|
||||||
#else /* CONFIG_HYPERV */
|
#else /* CONFIG_HYPERV */
|
||||||
|
|
|
@ -570,9 +570,11 @@ static inline u32 type_flag(u32 type)
|
||||||
return type & ~BPF_BASE_TYPE_MASK;
|
return type & ~BPF_BASE_TYPE_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* only use after check_attach_btf_id() */
|
||||||
static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
|
static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type;
|
return prog->type == BPF_PROG_TYPE_EXT ?
|
||||||
|
prog->aux->dst_prog->type : prog->type;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _LINUX_BPF_VERIFIER_H */
|
#endif /* _LINUX_BPF_VERIFIER_H */
|
||||||
|
|
|
@ -23,8 +23,6 @@ struct virtio_shm_region {
|
||||||
* any of @get/@set, @get_status/@set_status, or @get_features/
|
* any of @get/@set, @get_status/@set_status, or @get_features/
|
||||||
* @finalize_features are NOT safe to be called from an atomic
|
* @finalize_features are NOT safe to be called from an atomic
|
||||||
* context.
|
* context.
|
||||||
* @enable_cbs: enable the callbacks
|
|
||||||
* vdev: the virtio_device
|
|
||||||
* @get: read the value of a configuration field
|
* @get: read the value of a configuration field
|
||||||
* vdev: the virtio_device
|
* vdev: the virtio_device
|
||||||
* offset: the offset of the configuration field
|
* offset: the offset of the configuration field
|
||||||
|
@ -78,7 +76,6 @@ struct virtio_shm_region {
|
||||||
*/
|
*/
|
||||||
typedef void vq_callback_t(struct virtqueue *);
|
typedef void vq_callback_t(struct virtqueue *);
|
||||||
struct virtio_config_ops {
|
struct virtio_config_ops {
|
||||||
void (*enable_cbs)(struct virtio_device *vdev);
|
|
||||||
void (*get)(struct virtio_device *vdev, unsigned offset,
|
void (*get)(struct virtio_device *vdev, unsigned offset,
|
||||||
void *buf, unsigned len);
|
void *buf, unsigned len);
|
||||||
void (*set)(struct virtio_device *vdev, unsigned offset,
|
void (*set)(struct virtio_device *vdev, unsigned offset,
|
||||||
|
@ -233,9 +230,6 @@ void virtio_device_ready(struct virtio_device *dev)
|
||||||
{
|
{
|
||||||
unsigned status = dev->config->get_status(dev);
|
unsigned status = dev->config->get_status(dev);
|
||||||
|
|
||||||
if (dev->config->enable_cbs)
|
|
||||||
dev->config->enable_cbs(dev);
|
|
||||||
|
|
||||||
BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
|
BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
|
||||||
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
|
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,8 +36,6 @@ struct mctp_hdr {
|
||||||
#define MCTP_HDR_TAG_SHIFT 0
|
#define MCTP_HDR_TAG_SHIFT 0
|
||||||
#define MCTP_HDR_TAG_MASK GENMASK(2, 0)
|
#define MCTP_HDR_TAG_MASK GENMASK(2, 0)
|
||||||
|
|
||||||
#define MCTP_HEADER_MAXLEN 4
|
|
||||||
|
|
||||||
#define MCTP_INITIAL_DEFAULT_NET 1
|
#define MCTP_INITIAL_DEFAULT_NET 1
|
||||||
|
|
||||||
static inline bool mctp_address_unicast(mctp_eid_t eid)
|
static inline bool mctp_address_unicast(mctp_eid_t eid)
|
||||||
|
|
|
@ -2349,11 +2349,11 @@ kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
kprobe_multi_resolve_syms(const void *usyms, u32 cnt,
|
kprobe_multi_resolve_syms(const void __user *usyms, u32 cnt,
|
||||||
unsigned long *addrs)
|
unsigned long *addrs)
|
||||||
{
|
{
|
||||||
unsigned long addr, size;
|
unsigned long addr, size;
|
||||||
const char **syms;
|
const char __user **syms;
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
char *func;
|
char *func;
|
||||||
|
|
|
@ -65,7 +65,7 @@ static void rethook_free_rcu(struct rcu_head *head)
|
||||||
*/
|
*/
|
||||||
void rethook_free(struct rethook *rh)
|
void rethook_free(struct rethook *rh)
|
||||||
{
|
{
|
||||||
rcu_assign_pointer(rh->handler, NULL);
|
WRITE_ONCE(rh->handler, NULL);
|
||||||
|
|
||||||
call_rcu(&rh->rcu, rethook_free_rcu);
|
call_rcu(&rh->rcu, rethook_free_rcu);
|
||||||
}
|
}
|
||||||
|
|
|
@ -7016,24 +7016,33 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len
|
||||||
if (!th->ack || th->rst || th->syn)
|
if (!th->ack || th->rst || th->syn)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
if (unlikely(iph_len < sizeof(struct iphdr)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (tcp_synq_no_recent_overflow(sk))
|
if (tcp_synq_no_recent_overflow(sk))
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
cookie = ntohl(th->ack_seq) - 1;
|
cookie = ntohl(th->ack_seq) - 1;
|
||||||
|
|
||||||
switch (sk->sk_family) {
|
/* Both struct iphdr and struct ipv6hdr have the version field at the
|
||||||
case AF_INET:
|
* same offset so we can cast to the shorter header (struct iphdr).
|
||||||
if (unlikely(iph_len < sizeof(struct iphdr)))
|
*/
|
||||||
|
switch (((struct iphdr *)iph)->version) {
|
||||||
|
case 4:
|
||||||
|
if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
|
ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
#if IS_BUILTIN(CONFIG_IPV6)
|
#if IS_BUILTIN(CONFIG_IPV6)
|
||||||
case AF_INET6:
|
case 6:
|
||||||
if (unlikely(iph_len < sizeof(struct ipv6hdr)))
|
if (unlikely(iph_len < sizeof(struct ipv6hdr)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (sk->sk_family != AF_INET6)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
|
ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
|
||||||
break;
|
break;
|
||||||
#endif /* CONFIG_IPV6 */
|
#endif /* CONFIG_IPV6 */
|
||||||
|
|
|
@ -5276,11 +5276,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
|
||||||
if (skb_cloned(to))
|
if (skb_cloned(to))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* The page pool signature of struct page will eventually figure out
|
/* In general, avoid mixing slab allocated and page_pool allocated
|
||||||
* which pages can be recycled or not but for now let's prohibit slab
|
* pages within the same SKB. However when @to is not pp_recycle and
|
||||||
* allocated and page_pool allocated SKBs from being coalesced.
|
* @from is cloned, we can transition frag pages from page_pool to
|
||||||
|
* reference counted.
|
||||||
|
*
|
||||||
|
* On the other hand, don't allow coalescing two pp_recycle SKBs if
|
||||||
|
* @from is cloned, in case the SKB is using page_pool fragment
|
||||||
|
* references (PP_FLAG_PAGE_FRAG). Since we only take full page
|
||||||
|
* references for cloned SKBs at the moment that would result in
|
||||||
|
* inconsistent reference counts.
|
||||||
*/
|
*/
|
||||||
if (to->pp_recycle != from->pp_recycle)
|
if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (len <= skb_tailroom(to)) {
|
if (len <= skb_tailroom(to)) {
|
||||||
|
|
|
@ -335,11 +335,24 @@ static const struct attribute_group dsa_group = {
|
||||||
.attrs = dsa_slave_attrs,
|
.attrs = dsa_slave_attrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void dsa_master_reset_mtu(struct net_device *dev)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = dev_set_mtu(dev, ETH_DATA_LEN);
|
||||||
|
if (err)
|
||||||
|
netdev_dbg(dev,
|
||||||
|
"Unable to reset MTU to exclude DSA overheads\n");
|
||||||
|
}
|
||||||
|
|
||||||
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
|
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
|
||||||
{
|
{
|
||||||
|
const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
|
||||||
struct dsa_switch *ds = cpu_dp->ds;
|
struct dsa_switch *ds = cpu_dp->ds;
|
||||||
struct device_link *consumer_link;
|
struct device_link *consumer_link;
|
||||||
int ret;
|
int mtu, ret;
|
||||||
|
|
||||||
|
mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
|
||||||
|
|
||||||
/* The DSA master must use SET_NETDEV_DEV for this to work. */
|
/* The DSA master must use SET_NETDEV_DEV for this to work. */
|
||||||
consumer_link = device_link_add(ds->dev, dev->dev.parent,
|
consumer_link = device_link_add(ds->dev, dev->dev.parent,
|
||||||
|
@ -349,6 +362,15 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
|
||||||
"Failed to create a device link to DSA switch %s\n",
|
"Failed to create a device link to DSA switch %s\n",
|
||||||
dev_name(ds->dev));
|
dev_name(ds->dev));
|
||||||
|
|
||||||
|
/* The switch driver may not implement ->port_change_mtu(), case in
|
||||||
|
* which dsa_slave_change_mtu() will not update the master MTU either,
|
||||||
|
* so we need to do that here.
|
||||||
|
*/
|
||||||
|
ret = dev_set_mtu(dev, mtu);
|
||||||
|
if (ret)
|
||||||
|
netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
|
||||||
|
ret, mtu);
|
||||||
|
|
||||||
/* If we use a tagging format that doesn't have an ethertype
|
/* If we use a tagging format that doesn't have an ethertype
|
||||||
* field, make sure that all packets from this point on get
|
* field, make sure that all packets from this point on get
|
||||||
* sent to the tag format's receive function.
|
* sent to the tag format's receive function.
|
||||||
|
@ -384,6 +406,7 @@ void dsa_master_teardown(struct net_device *dev)
|
||||||
sysfs_remove_group(&dev->dev.kobj, &dsa_group);
|
sysfs_remove_group(&dev->dev.kobj, &dsa_group);
|
||||||
dsa_netdev_ops_set(dev, NULL);
|
dsa_netdev_ops_set(dev, NULL);
|
||||||
dsa_master_ethtool_teardown(dev);
|
dsa_master_ethtool_teardown(dev);
|
||||||
|
dsa_master_reset_mtu(dev);
|
||||||
dsa_master_set_promiscuity(dev, -1);
|
dsa_master_set_promiscuity(dev, -1);
|
||||||
|
|
||||||
dev->dsa_ptr = NULL;
|
dev->dsa_ptr = NULL;
|
||||||
|
|
|
@ -889,8 +889,13 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cfg->fc_oif || cfg->fc_gw_family) {
|
if (cfg->fc_oif || cfg->fc_gw_family) {
|
||||||
struct fib_nh *nh = fib_info_nh(fi, 0);
|
struct fib_nh *nh;
|
||||||
|
|
||||||
|
/* cannot match on nexthop object attributes */
|
||||||
|
if (fi->nh)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
nh = fib_info_nh(fi, 0);
|
||||||
if (cfg->fc_encap) {
|
if (cfg->fc_encap) {
|
||||||
if (fib_encap_match(net, cfg->fc_encap_type,
|
if (fib_encap_match(net, cfg->fc_encap_type,
|
||||||
cfg->fc_encap, nh, cfg, extack))
|
cfg->fc_encap, nh, cfg, extack))
|
||||||
|
|
|
@ -1653,7 +1653,6 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
|
||||||
mifi_t mifi;
|
mifi_t mifi;
|
||||||
struct net *net = sock_net(sk);
|
struct net *net = sock_net(sk);
|
||||||
struct mr_table *mrt;
|
struct mr_table *mrt;
|
||||||
bool do_wrmifwhole;
|
|
||||||
|
|
||||||
if (sk->sk_type != SOCK_RAW ||
|
if (sk->sk_type != SOCK_RAW ||
|
||||||
inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
|
inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
|
||||||
|
@ -1761,6 +1760,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
|
||||||
#ifdef CONFIG_IPV6_PIMSM_V2
|
#ifdef CONFIG_IPV6_PIMSM_V2
|
||||||
case MRT6_PIM:
|
case MRT6_PIM:
|
||||||
{
|
{
|
||||||
|
bool do_wrmifwhole;
|
||||||
int v;
|
int v;
|
||||||
|
|
||||||
if (optlen != sizeof(v))
|
if (optlen != sizeof(v))
|
||||||
|
|
|
@ -4484,7 +4484,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
|
||||||
struct inet6_dev *idev;
|
struct inet6_dev *idev;
|
||||||
int type;
|
int type;
|
||||||
|
|
||||||
if (netif_is_l3_master(skb->dev) &&
|
if (netif_is_l3_master(skb->dev) ||
|
||||||
dst->dev == net->loopback_dev)
|
dst->dev == net->loopback_dev)
|
||||||
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
|
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
|
||||||
else
|
else
|
||||||
|
|
|
@ -93,13 +93,13 @@ out_release:
|
||||||
static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||||
{
|
{
|
||||||
DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
|
DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
|
||||||
const int hlen = MCTP_HEADER_MAXLEN + sizeof(struct mctp_hdr);
|
|
||||||
int rc, addrlen = msg->msg_namelen;
|
int rc, addrlen = msg->msg_namelen;
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
|
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
|
||||||
struct mctp_skb_cb *cb;
|
struct mctp_skb_cb *cb;
|
||||||
struct mctp_route *rt;
|
struct mctp_route *rt;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb = NULL;
|
||||||
|
int hlen;
|
||||||
|
|
||||||
if (addr) {
|
if (addr) {
|
||||||
const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER |
|
const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER |
|
||||||
|
@ -129,6 +129,34 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||||
if (addr->smctp_network == MCTP_NET_ANY)
|
if (addr->smctp_network == MCTP_NET_ANY)
|
||||||
addr->smctp_network = mctp_default_net(sock_net(sk));
|
addr->smctp_network = mctp_default_net(sock_net(sk));
|
||||||
|
|
||||||
|
/* direct addressing */
|
||||||
|
if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
|
||||||
|
DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
|
||||||
|
extaddr, msg->msg_name);
|
||||||
|
struct net_device *dev;
|
||||||
|
|
||||||
|
rc = -EINVAL;
|
||||||
|
rcu_read_lock();
|
||||||
|
dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
|
||||||
|
/* check for correct halen */
|
||||||
|
if (dev && extaddr->smctp_halen == dev->addr_len) {
|
||||||
|
hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
|
||||||
|
rc = 0;
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
if (rc)
|
||||||
|
goto err_free;
|
||||||
|
rt = NULL;
|
||||||
|
} else {
|
||||||
|
rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
|
||||||
|
addr->smctp_addr.s_addr);
|
||||||
|
if (!rt) {
|
||||||
|
rc = -EHOSTUNREACH;
|
||||||
|
goto err_free;
|
||||||
|
}
|
||||||
|
hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
|
||||||
|
}
|
||||||
|
|
||||||
skb = sock_alloc_send_skb(sk, hlen + 1 + len,
|
skb = sock_alloc_send_skb(sk, hlen + 1 + len,
|
||||||
msg->msg_flags & MSG_DONTWAIT, &rc);
|
msg->msg_flags & MSG_DONTWAIT, &rc);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
|
@ -147,8 +175,8 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||||
cb = __mctp_cb(skb);
|
cb = __mctp_cb(skb);
|
||||||
cb->net = addr->smctp_network;
|
cb->net = addr->smctp_network;
|
||||||
|
|
||||||
/* direct addressing */
|
if (!rt) {
|
||||||
if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
|
/* fill extended address in cb */
|
||||||
DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
|
DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
|
||||||
extaddr, msg->msg_name);
|
extaddr, msg->msg_name);
|
||||||
|
|
||||||
|
@ -159,17 +187,9 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||||
}
|
}
|
||||||
|
|
||||||
cb->ifindex = extaddr->smctp_ifindex;
|
cb->ifindex = extaddr->smctp_ifindex;
|
||||||
|
/* smctp_halen is checked above */
|
||||||
cb->halen = extaddr->smctp_halen;
|
cb->halen = extaddr->smctp_halen;
|
||||||
memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
|
memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
|
||||||
|
|
||||||
rt = NULL;
|
|
||||||
} else {
|
|
||||||
rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
|
|
||||||
addr->smctp_addr.s_addr);
|
|
||||||
if (!rt) {
|
|
||||||
rc = -EHOSTUNREACH;
|
|
||||||
goto err_free;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
|
rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
|
||||||
|
|
|
@ -503,6 +503,11 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
|
||||||
|
|
||||||
if (cb->ifindex) {
|
if (cb->ifindex) {
|
||||||
/* direct route; use the hwaddr we stashed in sendmsg */
|
/* direct route; use the hwaddr we stashed in sendmsg */
|
||||||
|
if (cb->halen != skb->dev->addr_len) {
|
||||||
|
/* sanity check, sendmsg should have already caught this */
|
||||||
|
kfree_skb(skb);
|
||||||
|
return -EMSGSIZE;
|
||||||
|
}
|
||||||
daddr = cb->haddr;
|
daddr = cb->haddr;
|
||||||
} else {
|
} else {
|
||||||
/* If lookup fails let the device handle daddr==NULL */
|
/* If lookup fails let the device handle daddr==NULL */
|
||||||
|
@ -512,7 +517,7 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
|
||||||
|
|
||||||
rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol),
|
rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol),
|
||||||
daddr, skb->dev->dev_addr, skb->len);
|
daddr, skb->dev->dev_addr, skb->len);
|
||||||
if (rc) {
|
if (rc < 0) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return -EHOSTUNREACH;
|
return -EHOSTUNREACH;
|
||||||
}
|
}
|
||||||
|
@ -756,7 +761,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
|
||||||
{
|
{
|
||||||
const unsigned int hlen = sizeof(struct mctp_hdr);
|
const unsigned int hlen = sizeof(struct mctp_hdr);
|
||||||
struct mctp_hdr *hdr, *hdr2;
|
struct mctp_hdr *hdr, *hdr2;
|
||||||
unsigned int pos, size;
|
unsigned int pos, size, headroom;
|
||||||
struct sk_buff *skb2;
|
struct sk_buff *skb2;
|
||||||
int rc;
|
int rc;
|
||||||
u8 seq;
|
u8 seq;
|
||||||
|
@ -770,6 +775,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* keep same headroom as the original skb */
|
||||||
|
headroom = skb_headroom(skb);
|
||||||
|
|
||||||
/* we've got the header */
|
/* we've got the header */
|
||||||
skb_pull(skb, hlen);
|
skb_pull(skb, hlen);
|
||||||
|
|
||||||
|
@ -777,7 +785,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
|
||||||
/* size of message payload */
|
/* size of message payload */
|
||||||
size = min(mtu - hlen, skb->len - pos);
|
size = min(mtu - hlen, skb->len - pos);
|
||||||
|
|
||||||
skb2 = alloc_skb(MCTP_HEADER_MAXLEN + hlen + size, GFP_KERNEL);
|
skb2 = alloc_skb(headroom + hlen + size, GFP_KERNEL);
|
||||||
if (!skb2) {
|
if (!skb2) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
break;
|
break;
|
||||||
|
@ -793,7 +801,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
|
||||||
skb_set_owner_w(skb2, skb->sk);
|
skb_set_owner_w(skb2, skb->sk);
|
||||||
|
|
||||||
/* establish packet */
|
/* establish packet */
|
||||||
skb_reserve(skb2, MCTP_HEADER_MAXLEN);
|
skb_reserve(skb2, headroom);
|
||||||
skb_reset_network_header(skb2);
|
skb_reset_network_header(skb2);
|
||||||
skb_put(skb2, hlen + size);
|
skb_put(skb2, hlen + size);
|
||||||
skb2->transport_header = skb2->network_header + hlen;
|
skb2->transport_header = skb2->network_header + hlen;
|
||||||
|
|
|
@ -5526,7 +5526,7 @@ int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
|
||||||
int err, i, k;
|
int err, i, k;
|
||||||
|
|
||||||
for (i = 0; i < set->num_exprs; i++) {
|
for (i = 0; i < set->num_exprs; i++) {
|
||||||
expr = kzalloc(set->exprs[i]->ops->size, GFP_KERNEL);
|
expr = kzalloc(set->exprs[i]->ops->size, GFP_KERNEL_ACCOUNT);
|
||||||
if (!expr)
|
if (!expr)
|
||||||
goto err_expr;
|
goto err_expr;
|
||||||
|
|
||||||
|
|
|
@ -290,7 +290,7 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
|
||||||
if (!track->regs[priv->sreg].selector)
|
if (!track->regs[priv->sreg].selector)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
bitwise = nft_expr_priv(expr);
|
bitwise = nft_expr_priv(track->regs[priv->dreg].selector);
|
||||||
if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
|
if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
|
||||||
track->regs[priv->sreg].num_reg == 0 &&
|
track->regs[priv->sreg].num_reg == 0 &&
|
||||||
track->regs[priv->dreg].bitwise &&
|
track->regs[priv->dreg].bitwise &&
|
||||||
|
@ -442,7 +442,7 @@ static bool nft_bitwise_fast_reduce(struct nft_regs_track *track,
|
||||||
if (!track->regs[priv->sreg].selector)
|
if (!track->regs[priv->sreg].selector)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
bitwise = nft_expr_priv(expr);
|
bitwise = nft_expr_priv(track->regs[priv->dreg].selector);
|
||||||
if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
|
if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
|
||||||
track->regs[priv->dreg].bitwise &&
|
track->regs[priv->dreg].bitwise &&
|
||||||
track->regs[priv->dreg].bitwise->ops == expr->ops &&
|
track->regs[priv->dreg].bitwise->ops == expr->ops &&
|
||||||
|
|
|
@ -77,7 +77,7 @@ static int nft_connlimit_do_init(const struct nft_ctx *ctx,
|
||||||
invert = true;
|
invert = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->list = kmalloc(sizeof(*priv->list), GFP_KERNEL);
|
priv->list = kmalloc(sizeof(*priv->list), GFP_KERNEL_ACCOUNT);
|
||||||
if (!priv->list)
|
if (!priv->list)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ static int nft_counter_do_init(const struct nlattr * const tb[],
|
||||||
struct nft_counter __percpu *cpu_stats;
|
struct nft_counter __percpu *cpu_stats;
|
||||||
struct nft_counter *this_cpu;
|
struct nft_counter *this_cpu;
|
||||||
|
|
||||||
cpu_stats = alloc_percpu(struct nft_counter);
|
cpu_stats = alloc_percpu_gfp(struct nft_counter, GFP_KERNEL_ACCOUNT);
|
||||||
if (cpu_stats == NULL)
|
if (cpu_stats == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
||||||
u64 last_jiffies;
|
u64 last_jiffies;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
last = kzalloc(sizeof(*last), GFP_KERNEL);
|
last = kzalloc(sizeof(*last), GFP_KERNEL_ACCOUNT);
|
||||||
if (!last)
|
if (!last)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,7 @@ static int nft_limit_init(struct nft_limit_priv *priv,
|
||||||
priv->rate);
|
priv->rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL);
|
priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL_ACCOUNT);
|
||||||
if (!priv->limit)
|
if (!priv->limit)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,7 @@ static int nft_quota_do_init(const struct nlattr * const tb[],
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->consumed = kmalloc(sizeof(*priv->consumed), GFP_KERNEL);
|
priv->consumed = kmalloc(sizeof(*priv->consumed), GFP_KERNEL_ACCOUNT);
|
||||||
if (!priv->consumed)
|
if (!priv->consumed)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -1051,7 +1051,7 @@ static int clone(struct datapath *dp, struct sk_buff *skb,
|
||||||
int rem = nla_len(attr);
|
int rem = nla_len(attr);
|
||||||
bool dont_clone_flow_key;
|
bool dont_clone_flow_key;
|
||||||
|
|
||||||
/* The first action is always 'OVS_CLONE_ATTR_ARG'. */
|
/* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
|
||||||
clone_arg = nla_data(attr);
|
clone_arg = nla_data(attr);
|
||||||
dont_clone_flow_key = nla_get_u32(clone_arg);
|
dont_clone_flow_key = nla_get_u32(clone_arg);
|
||||||
actions = nla_next(clone_arg, &rem);
|
actions = nla_next(clone_arg, &rem);
|
||||||
|
|
|
@ -2317,6 +2317,62 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size)
|
||||||
return sfa;
|
return sfa;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len);
|
||||||
|
|
||||||
|
static void ovs_nla_free_check_pkt_len_action(const struct nlattr *action)
|
||||||
|
{
|
||||||
|
const struct nlattr *a;
|
||||||
|
int rem;
|
||||||
|
|
||||||
|
nla_for_each_nested(a, action, rem) {
|
||||||
|
switch (nla_type(a)) {
|
||||||
|
case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL:
|
||||||
|
case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER:
|
||||||
|
ovs_nla_free_nested_actions(nla_data(a), nla_len(a));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ovs_nla_free_clone_action(const struct nlattr *action)
|
||||||
|
{
|
||||||
|
const struct nlattr *a = nla_data(action);
|
||||||
|
int rem = nla_len(action);
|
||||||
|
|
||||||
|
switch (nla_type(a)) {
|
||||||
|
case OVS_CLONE_ATTR_EXEC:
|
||||||
|
/* The real list of actions follows this attribute. */
|
||||||
|
a = nla_next(a, &rem);
|
||||||
|
ovs_nla_free_nested_actions(a, rem);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ovs_nla_free_dec_ttl_action(const struct nlattr *action)
|
||||||
|
{
|
||||||
|
const struct nlattr *a = nla_data(action);
|
||||||
|
|
||||||
|
switch (nla_type(a)) {
|
||||||
|
case OVS_DEC_TTL_ATTR_ACTION:
|
||||||
|
ovs_nla_free_nested_actions(nla_data(a), nla_len(a));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ovs_nla_free_sample_action(const struct nlattr *action)
|
||||||
|
{
|
||||||
|
const struct nlattr *a = nla_data(action);
|
||||||
|
int rem = nla_len(action);
|
||||||
|
|
||||||
|
switch (nla_type(a)) {
|
||||||
|
case OVS_SAMPLE_ATTR_ARG:
|
||||||
|
/* The real list of actions follows this attribute. */
|
||||||
|
a = nla_next(a, &rem);
|
||||||
|
ovs_nla_free_nested_actions(a, rem);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void ovs_nla_free_set_action(const struct nlattr *a)
|
static void ovs_nla_free_set_action(const struct nlattr *a)
|
||||||
{
|
{
|
||||||
const struct nlattr *ovs_key = nla_data(a);
|
const struct nlattr *ovs_key = nla_data(a);
|
||||||
|
@ -2330,25 +2386,54 @@ static void ovs_nla_free_set_action(const struct nlattr *a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
|
static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len)
|
||||||
{
|
{
|
||||||
const struct nlattr *a;
|
const struct nlattr *a;
|
||||||
int rem;
|
int rem;
|
||||||
|
|
||||||
if (!sf_acts)
|
/* Whenever new actions are added, the need to update this
|
||||||
|
* function should be considered.
|
||||||
|
*/
|
||||||
|
BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 23);
|
||||||
|
|
||||||
|
if (!actions)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
|
nla_for_each_attr(a, actions, len, rem) {
|
||||||
switch (nla_type(a)) {
|
switch (nla_type(a)) {
|
||||||
case OVS_ACTION_ATTR_SET:
|
case OVS_ACTION_ATTR_CHECK_PKT_LEN:
|
||||||
ovs_nla_free_set_action(a);
|
ovs_nla_free_check_pkt_len_action(a);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case OVS_ACTION_ATTR_CLONE:
|
||||||
|
ovs_nla_free_clone_action(a);
|
||||||
|
break;
|
||||||
|
|
||||||
case OVS_ACTION_ATTR_CT:
|
case OVS_ACTION_ATTR_CT:
|
||||||
ovs_ct_free_action(a);
|
ovs_ct_free_action(a);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case OVS_ACTION_ATTR_DEC_TTL:
|
||||||
|
ovs_nla_free_dec_ttl_action(a);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OVS_ACTION_ATTR_SAMPLE:
|
||||||
|
ovs_nla_free_sample_action(a);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OVS_ACTION_ATTR_SET:
|
||||||
|
ovs_nla_free_set_action(a);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
|
||||||
|
{
|
||||||
|
if (!sf_acts)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ovs_nla_free_nested_actions(sf_acts->actions, sf_acts->actions_len);
|
||||||
kfree(sf_acts);
|
kfree(sf_acts);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3458,7 +3543,9 @@ static int clone_action_to_attr(const struct nlattr *attr,
|
||||||
if (!start)
|
if (!start)
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
|
|
||||||
err = ovs_nla_put_actions(nla_data(attr), rem, skb);
|
/* Skipping the OVS_CLONE_ATTR_EXEC that is always the first attribute. */
|
||||||
|
attr = nla_next(nla_data(attr), &rem);
|
||||||
|
err = ovs_nla_put_actions(attr, rem, skb);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
nla_nest_cancel(skb, start);
|
nla_nest_cancel(skb, start);
|
||||||
|
|
|
@ -113,8 +113,8 @@ static __net_exit void rxrpc_exit_net(struct net *net)
|
||||||
struct rxrpc_net *rxnet = rxrpc_net(net);
|
struct rxrpc_net *rxnet = rxrpc_net(net);
|
||||||
|
|
||||||
rxnet->live = false;
|
rxnet->live = false;
|
||||||
del_timer_sync(&rxnet->peer_keepalive_timer);
|
|
||||||
cancel_work_sync(&rxnet->peer_keepalive_work);
|
cancel_work_sync(&rxnet->peer_keepalive_work);
|
||||||
|
del_timer_sync(&rxnet->peer_keepalive_timer);
|
||||||
rxrpc_destroy_all_calls(rxnet);
|
rxrpc_destroy_all_calls(rxnet);
|
||||||
rxrpc_destroy_all_connections(rxnet);
|
rxrpc_destroy_all_connections(rxnet);
|
||||||
rxrpc_destroy_all_peers(rxnet);
|
rxrpc_destroy_all_peers(rxnet);
|
||||||
|
|
|
@ -914,6 +914,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
|
||||||
ctx->asoc->base.sk->sk_err = -error;
|
ctx->asoc->base.sk->sk_err = -error;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
ctx->asoc->stats.octrlchunks++;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SCTP_CID_ABORT:
|
case SCTP_CID_ABORT:
|
||||||
|
@ -938,7 +939,10 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
|
||||||
|
|
||||||
case SCTP_CID_HEARTBEAT:
|
case SCTP_CID_HEARTBEAT:
|
||||||
if (chunk->pmtu_probe) {
|
if (chunk->pmtu_probe) {
|
||||||
sctp_packet_singleton(ctx->transport, chunk, ctx->gfp);
|
error = sctp_packet_singleton(ctx->transport,
|
||||||
|
chunk, ctx->gfp);
|
||||||
|
if (!error)
|
||||||
|
ctx->asoc->stats.octrlchunks++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
fallthrough;
|
fallthrough;
|
||||||
|
|
|
@ -1496,7 +1496,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
|
||||||
if (prot->version == TLS_1_3_VERSION ||
|
if (prot->version == TLS_1_3_VERSION ||
|
||||||
prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
|
prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
|
||||||
memcpy(iv + iv_offset, tls_ctx->rx.iv,
|
memcpy(iv + iv_offset, tls_ctx->rx.iv,
|
||||||
crypto_aead_ivsize(ctx->aead_recv));
|
prot->iv_size + prot->salt_size);
|
||||||
else
|
else
|
||||||
memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
|
memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
|
||||||
|
|
||||||
|
|
|
@ -828,8 +828,10 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
|
||||||
s->map_cnt = %zu; \n\
|
s->map_cnt = %zu; \n\
|
||||||
s->map_skel_sz = sizeof(*s->maps); \n\
|
s->map_skel_sz = sizeof(*s->maps); \n\
|
||||||
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
|
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
|
||||||
if (!s->maps) \n\
|
if (!s->maps) { \n\
|
||||||
|
err = -ENOMEM; \n\
|
||||||
goto err; \n\
|
goto err; \n\
|
||||||
|
} \n\
|
||||||
",
|
",
|
||||||
map_cnt
|
map_cnt
|
||||||
);
|
);
|
||||||
|
@ -870,8 +872,10 @@ codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_li
|
||||||
s->prog_cnt = %zu; \n\
|
s->prog_cnt = %zu; \n\
|
||||||
s->prog_skel_sz = sizeof(*s->progs); \n\
|
s->prog_skel_sz = sizeof(*s->progs); \n\
|
||||||
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
|
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
|
||||||
if (!s->progs) \n\
|
if (!s->progs) { \n\
|
||||||
|
err = -ENOMEM; \n\
|
||||||
goto err; \n\
|
goto err; \n\
|
||||||
|
} \n\
|
||||||
",
|
",
|
||||||
prog_cnt
|
prog_cnt
|
||||||
);
|
);
|
||||||
|
@ -1182,10 +1186,13 @@ static int do_skeleton(int argc, char **argv)
|
||||||
%1$s__create_skeleton(struct %1$s *obj) \n\
|
%1$s__create_skeleton(struct %1$s *obj) \n\
|
||||||
{ \n\
|
{ \n\
|
||||||
struct bpf_object_skeleton *s; \n\
|
struct bpf_object_skeleton *s; \n\
|
||||||
|
int err; \n\
|
||||||
\n\
|
\n\
|
||||||
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
|
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
|
||||||
if (!s) \n\
|
if (!s) { \n\
|
||||||
|
err = -ENOMEM; \n\
|
||||||
goto err; \n\
|
goto err; \n\
|
||||||
|
} \n\
|
||||||
\n\
|
\n\
|
||||||
s->sz = sizeof(*s); \n\
|
s->sz = sizeof(*s); \n\
|
||||||
s->name = \"%1$s\"; \n\
|
s->name = \"%1$s\"; \n\
|
||||||
|
@ -1206,7 +1213,7 @@ static int do_skeleton(int argc, char **argv)
|
||||||
return 0; \n\
|
return 0; \n\
|
||||||
err: \n\
|
err: \n\
|
||||||
bpf_object__destroy_skeleton(s); \n\
|
bpf_object__destroy_skeleton(s); \n\
|
||||||
return -ENOMEM; \n\
|
return err; \n\
|
||||||
} \n\
|
} \n\
|
||||||
\n\
|
\n\
|
||||||
static inline const void *%2$s__elf_bytes(size_t *sz) \n\
|
static inline const void *%2$s__elf_bytes(size_t *sz) \n\
|
||||||
|
@ -1466,12 +1473,12 @@ static int do_subskeleton(int argc, char **argv)
|
||||||
\n\
|
\n\
|
||||||
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
|
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
|
||||||
if (!obj) { \n\
|
if (!obj) { \n\
|
||||||
errno = ENOMEM; \n\
|
err = -ENOMEM; \n\
|
||||||
goto err; \n\
|
goto err; \n\
|
||||||
} \n\
|
} \n\
|
||||||
s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
|
s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
|
||||||
if (!s) { \n\
|
if (!s) { \n\
|
||||||
errno = ENOMEM; \n\
|
err = -ENOMEM; \n\
|
||||||
goto err; \n\
|
goto err; \n\
|
||||||
} \n\
|
} \n\
|
||||||
s->sz = sizeof(*s); \n\
|
s->sz = sizeof(*s); \n\
|
||||||
|
@ -1483,7 +1490,7 @@ static int do_subskeleton(int argc, char **argv)
|
||||||
s->var_cnt = %2$d; \n\
|
s->var_cnt = %2$d; \n\
|
||||||
s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
|
s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
|
||||||
if (!s->vars) { \n\
|
if (!s->vars) { \n\
|
||||||
errno = ENOMEM; \n\
|
err = -ENOMEM; \n\
|
||||||
goto err; \n\
|
goto err; \n\
|
||||||
} \n\
|
} \n\
|
||||||
",
|
",
|
||||||
|
@ -1538,6 +1545,7 @@ static int do_subskeleton(int argc, char **argv)
|
||||||
return obj; \n\
|
return obj; \n\
|
||||||
err: \n\
|
err: \n\
|
||||||
%1$s__destroy(obj); \n\
|
%1$s__destroy(obj); \n\
|
||||||
|
errno = -err; \n\
|
||||||
return NULL; \n\
|
return NULL; \n\
|
||||||
} \n\
|
} \n\
|
||||||
\n\
|
\n\
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
|
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
|
||||||
#include <test_progs.h>
|
#include <test_progs.h>
|
||||||
#include "dummy_st_ops.skel.h"
|
#include "dummy_st_ops.skel.h"
|
||||||
|
#include "trace_dummy_st_ops.skel.h"
|
||||||
|
|
||||||
/* Need to keep consistent with definition in include/linux/bpf.h */
|
/* Need to keep consistent with definition in include/linux/bpf.h */
|
||||||
struct bpf_dummy_ops_state {
|
struct bpf_dummy_ops_state {
|
||||||
|
@ -56,6 +57,7 @@ static void test_dummy_init_ptr_arg(void)
|
||||||
.ctx_in = args,
|
.ctx_in = args,
|
||||||
.ctx_size_in = sizeof(args),
|
.ctx_size_in = sizeof(args),
|
||||||
);
|
);
|
||||||
|
struct trace_dummy_st_ops *trace_skel;
|
||||||
struct dummy_st_ops *skel;
|
struct dummy_st_ops *skel;
|
||||||
int fd, err;
|
int fd, err;
|
||||||
|
|
||||||
|
@ -64,12 +66,33 @@ static void test_dummy_init_ptr_arg(void)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
fd = bpf_program__fd(skel->progs.test_1);
|
fd = bpf_program__fd(skel->progs.test_1);
|
||||||
|
|
||||||
|
trace_skel = trace_dummy_st_ops__open();
|
||||||
|
if (!ASSERT_OK_PTR(trace_skel, "trace_dummy_st_ops__open"))
|
||||||
|
goto done;
|
||||||
|
|
||||||
|
err = bpf_program__set_attach_target(trace_skel->progs.fentry_test_1,
|
||||||
|
fd, "test_1");
|
||||||
|
if (!ASSERT_OK(err, "set_attach_target(fentry_test_1)"))
|
||||||
|
goto done;
|
||||||
|
|
||||||
|
err = trace_dummy_st_ops__load(trace_skel);
|
||||||
|
if (!ASSERT_OK(err, "load(trace_skel)"))
|
||||||
|
goto done;
|
||||||
|
|
||||||
|
err = trace_dummy_st_ops__attach(trace_skel);
|
||||||
|
if (!ASSERT_OK(err, "attach(trace_skel)"))
|
||||||
|
goto done;
|
||||||
|
|
||||||
err = bpf_prog_test_run_opts(fd, &attr);
|
err = bpf_prog_test_run_opts(fd, &attr);
|
||||||
ASSERT_OK(err, "test_run");
|
ASSERT_OK(err, "test_run");
|
||||||
ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret");
|
ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret");
|
||||||
ASSERT_EQ(attr.retval, exp_retval, "test_ret");
|
ASSERT_EQ(attr.retval, exp_retval, "test_ret");
|
||||||
|
ASSERT_EQ(trace_skel->bss->val, exp_retval, "fentry_val");
|
||||||
|
|
||||||
|
done:
|
||||||
dummy_st_ops__destroy(skel);
|
dummy_st_ops__destroy(skel);
|
||||||
|
trace_dummy_st_ops__destroy(trace_skel);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_dummy_multiple_args(void)
|
static void test_dummy_multiple_args(void)
|
||||||
|
|
|
@ -367,7 +367,7 @@ static inline int check_array_of_maps(void)
|
||||||
|
|
||||||
VERIFY(check_default(&array_of_maps->map, map));
|
VERIFY(check_default(&array_of_maps->map, map));
|
||||||
inner_map = bpf_map_lookup_elem(array_of_maps, &key);
|
inner_map = bpf_map_lookup_elem(array_of_maps, &key);
|
||||||
VERIFY(inner_map != 0);
|
VERIFY(inner_map != NULL);
|
||||||
VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
|
VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -394,7 +394,7 @@ static inline int check_hash_of_maps(void)
|
||||||
|
|
||||||
VERIFY(check_default(&hash_of_maps->map, map));
|
VERIFY(check_default(&hash_of_maps->map, map));
|
||||||
inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
|
inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
|
||||||
VERIFY(inner_map != 0);
|
VERIFY(inner_map != NULL);
|
||||||
VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
|
VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
#include <linux/bpf.h>
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
#include <bpf/bpf_tracing.h>
|
||||||
|
|
||||||
|
int val = 0;
|
||||||
|
|
||||||
|
SEC("fentry/test_1")
|
||||||
|
int BPF_PROG(fentry_test_1, __u64 *st_ops_ctx)
|
||||||
|
{
|
||||||
|
__u64 state;
|
||||||
|
|
||||||
|
/* Read the traced st_ops arg1 which is a pointer */
|
||||||
|
bpf_probe_read_kernel(&state, sizeof(__u64), (void *)st_ops_ctx);
|
||||||
|
/* Read state->val */
|
||||||
|
bpf_probe_read_kernel(&val, sizeof(__u32), (void *)state);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
char _license[] SEC("license") = "GPL";
|
|
@ -18,8 +18,9 @@
|
||||||
#include "bpf_rlimit.h"
|
#include "bpf_rlimit.h"
|
||||||
#include "cgroup_helpers.h"
|
#include "cgroup_helpers.h"
|
||||||
|
|
||||||
static int start_server(const struct sockaddr *addr, socklen_t len)
|
static int start_server(const struct sockaddr *addr, socklen_t len, bool dual)
|
||||||
{
|
{
|
||||||
|
int mode = !dual;
|
||||||
int fd;
|
int fd;
|
||||||
|
|
||||||
fd = socket(addr->sa_family, SOCK_STREAM, 0);
|
fd = socket(addr->sa_family, SOCK_STREAM, 0);
|
||||||
|
@ -28,6 +29,14 @@ static int start_server(const struct sockaddr *addr, socklen_t len)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (addr->sa_family == AF_INET6) {
|
||||||
|
if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *)&mode,
|
||||||
|
sizeof(mode)) == -1) {
|
||||||
|
log_err("Failed to set the dual-stack mode");
|
||||||
|
goto close_out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (bind(fd, addr, len) == -1) {
|
if (bind(fd, addr, len) == -1) {
|
||||||
log_err("Failed to bind server socket");
|
log_err("Failed to bind server socket");
|
||||||
goto close_out;
|
goto close_out;
|
||||||
|
@ -47,24 +56,17 @@ out:
|
||||||
return fd;
|
return fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int connect_to_server(int server_fd)
|
static int connect_to_server(const struct sockaddr *addr, socklen_t len)
|
||||||
{
|
{
|
||||||
struct sockaddr_storage addr;
|
|
||||||
socklen_t len = sizeof(addr);
|
|
||||||
int fd = -1;
|
int fd = -1;
|
||||||
|
|
||||||
if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
|
fd = socket(addr->sa_family, SOCK_STREAM, 0);
|
||||||
log_err("Failed to get server addr");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
fd = socket(addr.ss_family, SOCK_STREAM, 0);
|
|
||||||
if (fd == -1) {
|
if (fd == -1) {
|
||||||
log_err("Failed to create client socket");
|
log_err("Failed to create client socket");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (connect(fd, (const struct sockaddr *)&addr, len) == -1) {
|
if (connect(fd, (const struct sockaddr *)addr, len) == -1) {
|
||||||
log_err("Fail to connect to server");
|
log_err("Fail to connect to server");
|
||||||
goto close_out;
|
goto close_out;
|
||||||
}
|
}
|
||||||
|
@ -116,7 +118,8 @@ err:
|
||||||
return map_fd;
|
return map_fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int run_test(int server_fd, int results_fd, bool xdp)
|
static int run_test(int server_fd, int results_fd, bool xdp,
|
||||||
|
const struct sockaddr *addr, socklen_t len)
|
||||||
{
|
{
|
||||||
int client = -1, srv_client = -1;
|
int client = -1, srv_client = -1;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -142,7 +145,7 @@ static int run_test(int server_fd, int results_fd, bool xdp)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
client = connect_to_server(server_fd);
|
client = connect_to_server(addr, len);
|
||||||
if (client == -1)
|
if (client == -1)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -199,12 +202,30 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool get_port(int server_fd, in_port_t *port)
|
||||||
|
{
|
||||||
|
struct sockaddr_in addr;
|
||||||
|
socklen_t len = sizeof(addr);
|
||||||
|
|
||||||
|
if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
|
||||||
|
log_err("Failed to get server addr");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* sin_port and sin6_port are located at the same offset. */
|
||||||
|
*port = addr.sin_port;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
struct sockaddr_in addr4;
|
struct sockaddr_in addr4;
|
||||||
struct sockaddr_in6 addr6;
|
struct sockaddr_in6 addr6;
|
||||||
|
struct sockaddr_in addr4dual;
|
||||||
|
struct sockaddr_in6 addr6dual;
|
||||||
int server = -1;
|
int server = -1;
|
||||||
int server_v6 = -1;
|
int server_v6 = -1;
|
||||||
|
int server_dual = -1;
|
||||||
int results = -1;
|
int results = -1;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
bool xdp;
|
bool xdp;
|
||||||
|
@ -224,25 +245,43 @@ int main(int argc, char **argv)
|
||||||
addr4.sin_family = AF_INET;
|
addr4.sin_family = AF_INET;
|
||||||
addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
|
addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
|
||||||
addr4.sin_port = 0;
|
addr4.sin_port = 0;
|
||||||
|
memcpy(&addr4dual, &addr4, sizeof(addr4dual));
|
||||||
|
|
||||||
memset(&addr6, 0, sizeof(addr6));
|
memset(&addr6, 0, sizeof(addr6));
|
||||||
addr6.sin6_family = AF_INET6;
|
addr6.sin6_family = AF_INET6;
|
||||||
addr6.sin6_addr = in6addr_loopback;
|
addr6.sin6_addr = in6addr_loopback;
|
||||||
addr6.sin6_port = 0;
|
addr6.sin6_port = 0;
|
||||||
|
|
||||||
server = start_server((const struct sockaddr *)&addr4, sizeof(addr4));
|
memset(&addr6dual, 0, sizeof(addr6dual));
|
||||||
if (server == -1)
|
addr6dual.sin6_family = AF_INET6;
|
||||||
|
addr6dual.sin6_addr = in6addr_any;
|
||||||
|
addr6dual.sin6_port = 0;
|
||||||
|
|
||||||
|
server = start_server((const struct sockaddr *)&addr4, sizeof(addr4),
|
||||||
|
false);
|
||||||
|
if (server == -1 || !get_port(server, &addr4.sin_port))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
server_v6 = start_server((const struct sockaddr *)&addr6,
|
server_v6 = start_server((const struct sockaddr *)&addr6,
|
||||||
sizeof(addr6));
|
sizeof(addr6), false);
|
||||||
if (server_v6 == -1)
|
if (server_v6 == -1 || !get_port(server_v6, &addr6.sin6_port))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (run_test(server, results, xdp))
|
server_dual = start_server((const struct sockaddr *)&addr6dual,
|
||||||
|
sizeof(addr6dual), true);
|
||||||
|
if (server_dual == -1 || !get_port(server_dual, &addr4dual.sin_port))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (run_test(server_v6, results, xdp))
|
if (run_test(server, results, xdp,
|
||||||
|
(const struct sockaddr *)&addr4, sizeof(addr4)))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
if (run_test(server_v6, results, xdp,
|
||||||
|
(const struct sockaddr *)&addr6, sizeof(addr6)))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
if (run_test(server_dual, results, xdp,
|
||||||
|
(const struct sockaddr *)&addr4dual, sizeof(addr4dual)))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
printf("ok\n");
|
printf("ok\n");
|
||||||
|
@ -252,6 +291,7 @@ err:
|
||||||
out:
|
out:
|
||||||
close(server);
|
close(server);
|
||||||
close(server_v6);
|
close(server_v6);
|
||||||
|
close(server_dual);
|
||||||
close(results);
|
close(results);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1208,6 +1208,20 @@ ipv4_fcnal()
|
||||||
set +e
|
set +e
|
||||||
check_nexthop "dev veth1" ""
|
check_nexthop "dev veth1" ""
|
||||||
log_test $? 0 "Nexthops removed on admin down"
|
log_test $? 0 "Nexthops removed on admin down"
|
||||||
|
|
||||||
|
# nexthop route delete warning: route add with nhid and delete
|
||||||
|
# using device
|
||||||
|
run_cmd "$IP li set dev veth1 up"
|
||||||
|
run_cmd "$IP nexthop add id 12 via 172.16.1.3 dev veth1"
|
||||||
|
out1=`dmesg | grep "WARNING:.*fib_nh_match.*" | wc -l`
|
||||||
|
run_cmd "$IP route add 172.16.101.1/32 nhid 12"
|
||||||
|
run_cmd "$IP route delete 172.16.101.1/32 dev veth1"
|
||||||
|
out2=`dmesg | grep "WARNING:.*fib_nh_match.*" | wc -l`
|
||||||
|
[ $out1 -eq $out2 ]
|
||||||
|
rc=$?
|
||||||
|
log_test $rc 0 "Delete nexthop route warning"
|
||||||
|
run_cmd "$IP route delete 172.16.101.1/32 nhid 12"
|
||||||
|
run_cmd "$IP nexthop del id 12"
|
||||||
}
|
}
|
||||||
|
|
||||||
ipv4_grp_fcnal()
|
ipv4_grp_fcnal()
|
||||||
|
|
Loading…
Reference in New Issue