Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support TX_RING in AF_PACKET TPACKET_V3 mode, from Sowmini Varadhan. 2) Simplify classifier state on sk_buff in order to shrink it a bit. From Willem de Bruijn. 3) Introduce SIPHASH and it's usage for secure sequence numbers and syncookies. From Jason A. Donenfeld. 4) Reduce CPU usage for ICMP replies we are going to limit or suppress, from Jesper Dangaard Brouer. 5) Introduce Shared Memory Communications socket layer, from Ursula Braun. 6) Add RACK loss detection and allow it to actually trigger fast recovery instead of just assisting after other algorithms have triggered it. From Yuchung Cheng. 7) Add xmit_more and BQL support to mvneta driver, from Simon Guinot. 8) skb_cow_data avoidance in esp4 and esp6, from Steffen Klassert. 9) Export MPLS packet stats via netlink, from Robert Shearman. 10) Significantly improve inet port bind conflict handling, especially when an application is restarted and changes it's setting of reuseport. From Josef Bacik. 11) Implement TX batching in vhost_net, from Jason Wang. 12) Extend the dummy device so that VF (virtual function) features, such as configuration, can be more easily tested. From Phil Sutter. 13) Avoid two atomic ops per page on x86 in bnx2x driver, from Eric Dumazet. 14) Add new bpf MAP, implementing a longest prefix match trie. From Daniel Mack. 15) Packet sample offloading support in mlxsw driver, from Yotam Gigi. 16) Add new aquantia driver, from David VomLehn. 17) Add bpf tracepoints, from Daniel Borkmann. 18) Add support for port mirroring to b53 and bcm_sf2 drivers, from Florian Fainelli. 19) Remove custom busy polling in many drivers, it is done in the core networking since 4.5 times. From Eric Dumazet. 20) Support XDP adjust_head in virtio_net, from John Fastabend. 21) Fix several major holes in neighbour entry confirmation, from Julian Anastasov. 22) Add XDP support to bnxt_en driver, from Michael Chan. 23) VXLAN offloads for enic driver, from Govindarajulu Varadarajan. 24) Add IPVTAP driver (IP-VLAN based tap driver) from Sainath Grandhi. 25) Support GRO in IPSEC protocols, from Steffen Klassert" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1764 commits) Revert "ath10k: Search SMBIOS for OEM board file extension" net: socket: fix recvmmsg not returning error from sock_error bnxt_en: use eth_hw_addr_random() bpf: fix unlocking of jited image when module ronx not set arch: add ARCH_HAS_SET_MEMORY config net: napi_watchdog() can use napi_schedule_irqoff() tcp: Revert "tcp: tcp_probe: use spin_lock_bh()" net/hsr: use eth_hw_addr_random() net: mvpp2: enable building on 64-bit platforms net: mvpp2: switch to build_skb() in the RX path net: mvpp2: simplify MVPP2_PRS_RI_* definitions net: mvpp2: fix indentation of MVPP2_EXT_GLOBAL_CTRL_DEFAULT net: mvpp2: remove unused register definitions net: mvpp2: simplify mvpp2_bm_bufs_add() net: mvpp2: drop useless fields in mvpp2_bm_pool and related code net: mvpp2: remove unused 'tx_skb' field of 'struct mvpp2_tx_queue' net: mvpp2: release reference to txq_cpu[] entry after unmapping net: mvpp2: handle too large value in mvpp2_rx_time_coal_set() net: mvpp2: handle too large value handling in mvpp2_rx_pkts_coal_set() net: mvpp2: remove useless arguments in mvpp2_rx_{pkts, time}_coal_set ...
This commit is contained in:
commit
3051bf36c2
|
@ -2,7 +2,7 @@
|
|||
|
||||
Required properties:
|
||||
|
||||
- compatible: should be "brcm,bcm7445-switch-v4.0"
|
||||
- compatible: should be "brcm,bcm7445-switch-v4.0" or "brcm,bcm7278-switch-v4.0"
|
||||
- reg: addresses and length of the register sets for the device, must be 6
|
||||
pairs of register addresses and lengths
|
||||
- interrupts: interrupts for the devices, must be two interrupts
|
||||
|
@ -41,6 +41,13 @@ Optional properties:
|
|||
Admission Control Block supports reporting the number of packets in-flight in a
|
||||
switch queue
|
||||
|
||||
Port subnodes:
|
||||
|
||||
Optional properties:
|
||||
|
||||
- brcm,use-bcm-hdr: boolean property, if present, indicates that the switch
|
||||
port has Broadcom tags enabled (per-packet metadata)
|
||||
|
||||
Example:
|
||||
|
||||
switch_top@f0b00000 {
|
||||
|
@ -114,6 +121,7 @@ switch_top@f0b00000 {
|
|||
port@0 {
|
||||
label = "gphy";
|
||||
reg = <0>;
|
||||
brcm,use-bcm-hdr;
|
||||
};
|
||||
...
|
||||
};
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
* Broadcom BCM7xxx Ethernet Systemport Controller (SYSTEMPORT)
|
||||
|
||||
Required properties:
|
||||
- compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
|
||||
- compatible: should be one of:
|
||||
"brcm,systemport-v1.00"
|
||||
"brcm,systemportlite-v1.00" or
|
||||
"brcm,systemport"
|
||||
- reg: address and length of the register set for the device.
|
||||
- interrupts: interrupts for the device, first cell must be for the rx
|
||||
interrupts, and the second cell should be for the transmit queues. An
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
Generic Bluetooth controller over USB (btusb driver)
|
||||
---------------------------------------------------
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : should comply with the format "usbVID,PID" specified in
|
||||
Documentation/devicetree/bindings/usb/usb-device.txt
|
||||
At the time of writing, the only OF supported devices
|
||||
(more may be added later) are:
|
||||
|
||||
"usb1286,204e" (Marvell 8997)
|
||||
|
||||
Also, vendors that use btusb may have device additional properties, e.g:
|
||||
Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
|
||||
|
||||
Optional properties:
|
||||
|
||||
- interrupt-parent: phandle of the parent interrupt controller
|
||||
- interrupt-names: (see below)
|
||||
- interrupts : The interrupt specified by the name "wakeup" is the interrupt
|
||||
that shall be used for out-of-band wake-on-bt. Driver will
|
||||
request this interrupt for wakeup. During system suspend, the
|
||||
irq will be enabled so that the bluetooth chip can wakeup host
|
||||
platform out of band. During system resume, the irq will be
|
||||
disabled to make sure unnecessary interrupt is not received.
|
||||
|
||||
Example:
|
||||
|
||||
Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt:
|
||||
|
||||
&usb_host1_ehci {
|
||||
status = "okay";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
mvl_bt1: bt@1 {
|
||||
compatible = "usb1286,204e";
|
||||
reg = <1>;
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupt-name = "wakeup";
|
||||
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
|
||||
};
|
||||
};
|
|
@ -23,7 +23,6 @@ Required properties:
|
|||
|
||||
Optional properties:
|
||||
- ti,hwmods : Must be "cpgmac0"
|
||||
- no_bd_ram : Must be 0 or 1
|
||||
- dual_emac : Specifies Switch to act as Dual EMAC
|
||||
- syscon : Phandle to the system control device node, which is
|
||||
the control module device of the am33x
|
||||
|
@ -70,7 +69,6 @@ Examples:
|
|||
cpdma_channels = <8>;
|
||||
ale_entries = <1024>;
|
||||
bd_ram_size = <0x2000>;
|
||||
no_bd_ram = <0>;
|
||||
rx_descs = <64>;
|
||||
mac_control = <0x20>;
|
||||
slaves = <2>;
|
||||
|
@ -99,7 +97,6 @@ Examples:
|
|||
cpdma_channels = <8>;
|
||||
ale_entries = <1024>;
|
||||
bd_ram_size = <0x2000>;
|
||||
no_bd_ram = <0>;
|
||||
rx_descs = <64>;
|
||||
mac_control = <0x20>;
|
||||
slaves = <2>;
|
||||
|
|
|
@ -34,13 +34,9 @@ Required properties:
|
|||
|
||||
Each port children node must have the following mandatory properties:
|
||||
- reg : Describes the port address in the switch
|
||||
- label : Describes the label associated with this port, which
|
||||
will become the netdev name. Special labels are
|
||||
"cpu" to indicate a CPU port and "dsa" to
|
||||
indicate an uplink/downlink port between switches in
|
||||
the cluster.
|
||||
|
||||
A port labelled "dsa" has the following mandatory property:
|
||||
An uplink/downlink port between switches in the cluster has the following
|
||||
mandatory property:
|
||||
|
||||
- link : Should be a list of phandles to other switch's DSA
|
||||
port. This port is used as the outgoing port
|
||||
|
@ -48,12 +44,17 @@ A port labelled "dsa" has the following mandatory property:
|
|||
information must be given, not just the one hop
|
||||
routes to neighbouring switches.
|
||||
|
||||
A port labelled "cpu" has the following mandatory property:
|
||||
A CPU port has the following mandatory property:
|
||||
|
||||
- ethernet : Should be a phandle to a valid Ethernet device node.
|
||||
This host device is what the switch port is
|
||||
connected to.
|
||||
|
||||
A user port has the following optional property:
|
||||
|
||||
- label : Describes the label associated with this port, which
|
||||
will become the netdev name.
|
||||
|
||||
Port child nodes may also contain the following optional standardised
|
||||
properties, described in binding documents:
|
||||
|
||||
|
@ -107,7 +108,6 @@ linked into one DSA cluster.
|
|||
|
||||
switch0port5: port@5 {
|
||||
reg = <5>;
|
||||
label = "dsa";
|
||||
phy-mode = "rgmii-txid";
|
||||
link = <&switch1port6
|
||||
&switch2port9>;
|
||||
|
@ -119,7 +119,6 @@ linked into one DSA cluster.
|
|||
|
||||
port@6 {
|
||||
reg = <6>;
|
||||
label = "cpu";
|
||||
ethernet = <&fec1>;
|
||||
fixed-link {
|
||||
speed = <100>;
|
||||
|
@ -165,7 +164,6 @@ linked into one DSA cluster.
|
|||
|
||||
switch1port5: port@5 {
|
||||
reg = <5>;
|
||||
label = "dsa";
|
||||
link = <&switch2port9>;
|
||||
phy-mode = "rgmii-txid";
|
||||
fixed-link {
|
||||
|
@ -176,7 +174,6 @@ linked into one DSA cluster.
|
|||
|
||||
switch1port6: port@6 {
|
||||
reg = <6>;
|
||||
label = "dsa";
|
||||
phy-mode = "rgmii-txid";
|
||||
link = <&switch0port5>;
|
||||
fixed-link {
|
||||
|
@ -255,7 +252,6 @@ linked into one DSA cluster.
|
|||
|
||||
switch2port9: port@9 {
|
||||
reg = <9>;
|
||||
label = "dsa";
|
||||
phy-mode = "rgmii-txid";
|
||||
link = <&switch1port5
|
||||
&switch0port5>;
|
||||
|
|
|
@ -14,9 +14,9 @@ The properties described here are those specific to Marvell devices.
|
|||
Additional required and optional properties can be found in dsa.txt.
|
||||
|
||||
Required properties:
|
||||
- compatible : Should be one of "marvell,mv88e6085" or
|
||||
"marvell,mv88e6190"
|
||||
- reg : Address on the MII bus for the switch.
|
||||
- compatible : Should be one of "marvell,mv88e6085" or
|
||||
"marvell,mv88e6190"
|
||||
- reg : Address on the MII bus for the switch.
|
||||
|
||||
Optional properties:
|
||||
|
||||
|
@ -26,30 +26,67 @@ Optional properties:
|
|||
- interrupt-controller : Indicates the switch is itself an interrupt
|
||||
controller. This is used for the PHY interrupts.
|
||||
#interrupt-cells = <2> : Controller uses two cells, number and flag
|
||||
- mdio : container of PHY and devices on the switches MDIO
|
||||
bus
|
||||
- mdio : Container of PHY and devices on the switches MDIO
|
||||
bus.
|
||||
- mdio? : Container of PHYs and devices on the external MDIO
|
||||
bus. The node must contains a compatible string of
|
||||
"marvell,mv88e6xxx-mdio-external"
|
||||
|
||||
Example:
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
||||
switch0: switch@0 {
|
||||
compatible = "marvell,mv88e6085";
|
||||
reg = <0>;
|
||||
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
};
|
||||
switch0: switch@0 {
|
||||
compatible = "marvell,mv88e6085";
|
||||
reg = <0>;
|
||||
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
||||
switch0: switch@0 {
|
||||
compatible = "marvell,mv88e6390";
|
||||
reg = <0>;
|
||||
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
mdio1 {
|
||||
compatible = "marvell,mv88e6xxx-mdio-external";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy9: switch1phy0@9 {
|
||||
reg = <9>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -29,6 +29,9 @@ The following properties are common to the Ethernet controllers:
|
|||
* "smii"
|
||||
* "xgmii"
|
||||
* "trgmii"
|
||||
* "2000base-x",
|
||||
* "2500base-x",
|
||||
* "rxaui"
|
||||
- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
|
||||
- phy-handle: phandle, specifies a reference to a node representing a PHY
|
||||
device; this property is described in ePAPR and so preferred;
|
||||
|
|
|
@ -1,16 +1,21 @@
|
|||
Marvell 8897/8997 (sd8897/sd8997) bluetooth SDIO devices
|
||||
Marvell 8897/8997 (sd8897/sd8997) bluetooth devices (SDIO or USB based)
|
||||
------
|
||||
The 8997 devices supports multiple interfaces. When used on SDIO interfaces,
|
||||
the btmrvl driver is used and when used on USB interface, the btusb driver is
|
||||
used.
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : should be one of the following:
|
||||
* "marvell,sd8897-bt"
|
||||
* "marvell,sd8997-bt"
|
||||
* "marvell,sd8897-bt" (for SDIO)
|
||||
* "marvell,sd8997-bt" (for SDIO)
|
||||
* "usb1286,204e" (for USB)
|
||||
|
||||
Optional properties:
|
||||
|
||||
- marvell,cal-data: Calibration data downloaded to the device during
|
||||
initialization. This is an array of 28 values(u8).
|
||||
This is only applicable to SDIO devices.
|
||||
|
||||
- marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
|
||||
firmware will use the pin to wakeup host system (u16).
|
||||
|
@ -18,10 +23,15 @@ Optional properties:
|
|||
platform. The value will be configured to firmware. This
|
||||
is needed to work chip's sleep feature as expected (u16).
|
||||
- interrupt-parent: phandle of the parent interrupt controller
|
||||
- interrupts : interrupt pin number to the cpu. Driver will request an irq based
|
||||
on this interrupt number. During system suspend, the irq will be
|
||||
enabled so that the bluetooth chip can wakeup host platform under
|
||||
certain condition. During system resume, the irq will be disabled
|
||||
- interrupt-names: Used only for USB based devices (See below)
|
||||
- interrupts : specifies the interrupt pin number to the cpu. For SDIO, the
|
||||
driver will use the first interrupt specified in the interrupt
|
||||
array. For USB based devices, the driver will use the interrupt
|
||||
named "wakeup" from the interrupt-names and interrupt arrays.
|
||||
The driver will request an irq based on this interrupt number.
|
||||
During system suspend, the irq will be enabled so that the
|
||||
bluetooth chip can wakeup host platform under certain
|
||||
conditions. During system resume, the irq will be disabled
|
||||
to make sure unnecessary interrupt is not received.
|
||||
|
||||
Example:
|
||||
|
@ -29,7 +39,9 @@ Example:
|
|||
IRQ pin 119 is used as system wakeup source interrupt.
|
||||
wakeup pin 13 and gap 100ms are configured so that firmware can wakeup host
|
||||
using this device side pin and wakeup latency.
|
||||
calibration data is also available in below example.
|
||||
|
||||
Example for SDIO device follows (calibration data is also available in
|
||||
below example).
|
||||
|
||||
&mmc3 {
|
||||
status = "okay";
|
||||
|
@ -54,3 +66,21 @@ calibration data is also available in below example.
|
|||
marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
|
||||
};
|
||||
};
|
||||
|
||||
Example for USB device:
|
||||
|
||||
&usb_host1_ohci {
|
||||
status = "okay";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
mvl_bt1: bt@1 {
|
||||
compatible = "usb1286,204e";
|
||||
reg = <1>;
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupt-names = "wakeup";
|
||||
interrupts = <119 IRQ_TYPE_LEVEL_LOW>;
|
||||
marvell,wakeup-pin = /bits/ 16 <0x0d>;
|
||||
marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
|
||||
};
|
||||
};
|
|
@ -25,6 +25,22 @@ Required properties on Meson8b and newer:
|
|||
- "clkin0" - first parent clock of the internal mux
|
||||
- "clkin1" - second parent clock of the internal mux
|
||||
|
||||
Optional properties on Meson8b and newer:
|
||||
- amlogic,tx-delay-ns: The internal RGMII TX clock delay (provided
|
||||
by this driver) in nanoseconds. Allowed values
|
||||
are: 0ns, 2ns, 4ns, 6ns.
|
||||
When phy-mode is set to "rgmii" then the TX
|
||||
delay should be explicitly configured. When
|
||||
not configured a fallback of 2ns is used.
|
||||
When the phy-mode is set to either "rgmii-id"
|
||||
or "rgmii-txid" the TX clock delay is already
|
||||
provided by the PHY. In that case this
|
||||
property should be set to 0ns (which disables
|
||||
the TX clock delay in the MAC to prevent the
|
||||
clock from going off because both PHY and MAC
|
||||
are adding a delay).
|
||||
Any configuration is ignored when the phy-mode
|
||||
is set to "rmii".
|
||||
|
||||
Example for Meson6:
|
||||
|
||||
|
|
|
@ -27,6 +27,14 @@ Optional properties:
|
|||
'vddmac'.
|
||||
Default value is 0%.
|
||||
Ref: Table:1 - Edge rate change (below).
|
||||
- vsc8531,led-0-mode : LED mode. Specify how the LED[0] should behave.
|
||||
Allowed values are define in
|
||||
"include/dt-bindings/net/mscc-phy-vsc8531.h".
|
||||
Default value is VSC8531_LINK_1000_ACTIVITY (1).
|
||||
- vsc8531,led-1-mode : LED mode. Specify how the LED[1] should behave.
|
||||
Allowed values are define in
|
||||
"include/dt-bindings/net/mscc-phy-vsc8531.h".
|
||||
Default value is VSC8531_LINK_100_ACTIVITY (2).
|
||||
|
||||
Table: 1 - Edge rate change
|
||||
----------------------------------------------------------------|
|
||||
|
@ -60,4 +68,6 @@ Example:
|
|||
compatible = "ethernet-phy-id0007.0570";
|
||||
vsc8531,vddmac = <3300>;
|
||||
vsc8531,edge-slowdown = <7>;
|
||||
vsc8531,led-0-mode = <LINK_1000_ACTIVITY>;
|
||||
vsc8531,led-1-mode = <LINK_100_ACTIVITY>;
|
||||
};
|
||||
|
|
|
@ -39,6 +39,10 @@ Optional Properties:
|
|||
- enet-phy-lane-swap: If set, indicates the PHY will swap the TX/RX lanes to
|
||||
compensate for the board being designed with the lanes swapped.
|
||||
|
||||
- enet-phy-lane-no-swap: If set, indicates that PHY will disable swap of the
|
||||
TX/RX lanes. This property allows the PHY to work correcly after e.g. wrong
|
||||
bootstrap configuration caused by issues in PCB layout design.
|
||||
|
||||
- eee-broken-100tx:
|
||||
- eee-broken-1000t:
|
||||
- eee-broken-10gt:
|
||||
|
|
|
@ -6,6 +6,7 @@ Required properties:
|
|||
- compatible: should be "rockchip,<name>-gamc"
|
||||
"rockchip,rk3228-gmac": found on RK322x SoCs
|
||||
"rockchip,rk3288-gmac": found on RK3288 SoCs
|
||||
"rockchip,rk3328-gmac": found on RK3328 SoCs
|
||||
"rockchip,rk3366-gmac": found on RK3366 SoCs
|
||||
"rockchip,rk3368-gmac": found on RK3368 SoCs
|
||||
"rockchip,rk3399-gmac": found on RK3399 SoCs
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
* Synopsys DWC Ethernet QoS IP version 4.10 driver (GMAC)
|
||||
|
||||
This binding is deprecated, but it continues to be supported, but new
|
||||
features should be preferably added to the stmmac binding document.
|
||||
|
||||
This binding supports the Synopsys Designware Ethernet QoS (Quality Of Service)
|
||||
IP block. The IP supports multiple options for bus type, clocking and reset
|
||||
structure, and feature list. Consequently, a number of properties and list
|
||||
|
|
|
@ -49,6 +49,8 @@ Optional properties:
|
|||
- snps,force_sf_dma_mode Force DMA to use the Store and Forward
|
||||
mode for both tx and rx. This flag is
|
||||
ignored if force_thresh_dma_mode is set.
|
||||
- snps,en-tx-lpi-clockgating Enable gating of the MAC TX clock during
|
||||
TX low-power mode
|
||||
- snps,multicast-filter-bins: Number of multicast filter hash bins
|
||||
supported by this device instance
|
||||
- snps,perfect-filter-entries: Number of perfect filter entries supported
|
||||
|
@ -65,7 +67,6 @@ Optional properties:
|
|||
- snps,wr_osr_lmt: max write outstanding req. limit
|
||||
- snps,rd_osr_lmt: max read outstanding req. limit
|
||||
- snps,kbbe: do not cross 1KiB boundary.
|
||||
- snps,axi_all: align address
|
||||
- snps,blen: this is a vector of supported burst length.
|
||||
- snps,fb: fixed-burst
|
||||
- snps,mb: mixed-burst
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
Common IEEE 802.11 properties
|
||||
|
||||
This provides documentation of common properties that are valid for all wireless
|
||||
devices.
|
||||
|
||||
Optional properties:
|
||||
- ieee80211-freq-limit : list of supported frequency ranges in KHz. This can be
|
||||
used for devices that in a given config support less channels than
|
||||
normally. It may happen chipset supports a wide wireless band but it is
|
||||
limited to some part of it due to used antennas or power amplifier.
|
||||
An example case for this can be tri-band wireless router with two
|
||||
identical chipsets used for two different 5 GHz subbands. Using them
|
||||
incorrectly could not work or decrease performance noticeably.
|
||||
|
||||
Example:
|
||||
|
||||
pcie@0,0 {
|
||||
reg = <0x0000 0 0 0 0>;
|
||||
wifi@0,0 {
|
||||
reg = <0x0000 0 0 0 0>;
|
||||
ieee80211-freq-limit = <2402000 2482000>,
|
||||
<5170000 5250000>;
|
||||
};
|
||||
};
|
|
@ -44,6 +44,9 @@ Device registration
|
|||
.. kernel-doc:: include/net/cfg80211.h
|
||||
:functions: wiphy_new
|
||||
|
||||
.. kernel-doc:: include/net/cfg80211.h
|
||||
:functions: wiphy_read_of_freq_limits
|
||||
|
||||
.. kernel-doc:: include/net/cfg80211.h
|
||||
:functions: wiphy_register
|
||||
|
||||
|
|
|
@ -64,8 +64,7 @@ USAGE
|
|||
When inserting the driver modules the root cell must be specified along with a
|
||||
list of volume location server IP addresses:
|
||||
|
||||
modprobe af_rxrpc
|
||||
modprobe rxkad
|
||||
modprobe rxrpc
|
||||
modprobe kafs rootcell=cambridge.redhat.com:172.16.18.73:172.16.18.91
|
||||
|
||||
The first module is the AF_RXRPC network protocol driver. This provides the
|
||||
|
@ -214,34 +213,3 @@ If a file is opened with a particular key and then the file descriptor is
|
|||
passed to a process that doesn't have that key (perhaps over an AF_UNIX
|
||||
socket), then the operations on the file will be made with key that was used to
|
||||
open the file.
|
||||
|
||||
|
||||
========
|
||||
EXAMPLES
|
||||
========
|
||||
|
||||
Here's what I use to test this. Some of the names and IP addresses are local
|
||||
to my internal DNS. My "root.afs" partition has a mount point within it for
|
||||
some public volumes volumes.
|
||||
|
||||
insmod /tmp/rxrpc.o
|
||||
insmod /tmp/rxkad.o
|
||||
insmod /tmp/kafs.o rootcell=cambridge.redhat.com:172.16.18.91
|
||||
|
||||
mount -t afs \%root.afs. /afs
|
||||
mount -t afs \%cambridge.redhat.com:root.cell. /afs/cambridge.redhat.com/
|
||||
|
||||
echo add grand.central.org 18.9.48.14:128.2.203.61:130.237.48.87 > /proc/fs/afs/cells
|
||||
mount -t afs "#grand.central.org:root.cell." /afs/grand.central.org/
|
||||
mount -t afs "#grand.central.org:root.archive." /afs/grand.central.org/archive
|
||||
mount -t afs "#grand.central.org:root.contrib." /afs/grand.central.org/contrib
|
||||
mount -t afs "#grand.central.org:root.doc." /afs/grand.central.org/doc
|
||||
mount -t afs "#grand.central.org:root.project." /afs/grand.central.org/project
|
||||
mount -t afs "#grand.central.org:root.service." /afs/grand.central.org/service
|
||||
mount -t afs "#grand.central.org:root.software." /afs/grand.central.org/software
|
||||
mount -t afs "#grand.central.org:root.user." /afs/grand.central.org/user
|
||||
|
||||
umount /afs
|
||||
rmmod kafs
|
||||
rmmod rxkad
|
||||
rmmod rxrpc
|
||||
|
|
|
@ -295,7 +295,6 @@ DSA currently leverages the following subsystems:
|
|||
- MDIO/PHY library: drivers/net/phy/phy.c, mdio_bus.c
|
||||
- Switchdev: net/switchdev/*
|
||||
- Device Tree for various of_* functions
|
||||
- HWMON: drivers/hwmon/*
|
||||
|
||||
MDIO/PHY library
|
||||
----------------
|
||||
|
@ -349,12 +348,6 @@ Documentation/devicetree/bindings/net/dsa/dsa.txt. PHY/MDIO library helper
|
|||
functions such as of_get_phy_mode(), of_phy_connect() are also used to query
|
||||
per-port PHY specific details: interface connection, MDIO bus location etc..
|
||||
|
||||
HWMON
|
||||
-----
|
||||
|
||||
Some switch drivers feature internal temperature sensors which are exposed as
|
||||
regular HWMON devices in /sys/class/hwmon/.
|
||||
|
||||
Driver development
|
||||
==================
|
||||
|
||||
|
@ -495,23 +488,6 @@ Power management
|
|||
BR_STATE_DISABLED and propagating changes to the hardware if this port is
|
||||
disabled while being a bridge member
|
||||
|
||||
Hardware monitoring
|
||||
-------------------
|
||||
|
||||
These callbacks are only available if CONFIG_NET_DSA_HWMON is enabled:
|
||||
|
||||
- get_temp: this function queries the given switch for its temperature
|
||||
|
||||
- get_temp_limit: this function returns the switch current maximum temperature
|
||||
limit
|
||||
|
||||
- set_temp_limit: this function configures the maximum temperature limit allowed
|
||||
|
||||
- get_temp_alarm: this function returns the critical temperature threshold
|
||||
returning an alarm notification
|
||||
|
||||
See Documentation/hwmon/sysfs-interface for details.
|
||||
|
||||
Bridge layer
|
||||
------------
|
||||
|
||||
|
|
|
@ -0,0 +1,135 @@
|
|||
The Linux kernel GTP tunneling module
|
||||
======================================================================
|
||||
Documentation by Harald Welte <laforge@gnumonks.org>
|
||||
|
||||
In 'drivers/net/gtp.c' you are finding a kernel-level implementation
|
||||
of a GTP tunnel endpoint.
|
||||
|
||||
== What is GTP ==
|
||||
|
||||
GTP is the Generic Tunnel Protocol, which is a 3GPP protocol used for
|
||||
tunneling User-IP payload between a mobile station (phone, modem)
|
||||
and the interconnection between an external packet data network (such
|
||||
as the internet).
|
||||
|
||||
So when you start a 'data connection' from your mobile phone, the
|
||||
phone will use the control plane to signal for the establishment of
|
||||
such a tunnel between that external data network and the phone. The
|
||||
tunnel endpoints thus reside on the phone and in the gateway. All
|
||||
intermediate nodes just transport the encapsulated packet.
|
||||
|
||||
The phone itself does not implement GTP but uses some other
|
||||
technology-dependent protocol stack for transmitting the user IP
|
||||
payload, such as LLC/SNDCP/RLC/MAC.
|
||||
|
||||
At some network element inside the cellular operator infrastructure
|
||||
(SGSN in case of GPRS/EGPRS or classic UMTS, hNodeB in case of a 3G
|
||||
femtocell, eNodeB in case of 4G/LTE), the cellular protocol stacking
|
||||
is translated into GTP *without breaking the end-to-end tunnel*. So
|
||||
intermediate nodes just perform some specific relay function.
|
||||
|
||||
At some point the GTP packet ends up on the so-called GGSN (GSM/UMTS)
|
||||
or P-GW (LTE), which terminates the tunnel, decapsulates the packet
|
||||
and forwards it onto an external packet data network. This can be
|
||||
public internet, but can also be any private IP network (or even
|
||||
theoretically some non-IP network like X.25).
|
||||
|
||||
You can find the protocol specification in 3GPP TS 29.060, available
|
||||
publicly via the 3GPP website at http://www.3gpp.org/DynaReport/29060.htm
|
||||
|
||||
A direct PDF link to v13.6.0 is provided for convenience below:
|
||||
http://www.etsi.org/deliver/etsi_ts/129000_129099/129060/13.06.00_60/ts_129060v130600p.pdf
|
||||
|
||||
== The Linux GTP tunnelling module ==
|
||||
|
||||
The module implements the function of a tunnel endpoint, i.e. it is
|
||||
able to decapsulate tunneled IP packets in the uplink originated by
|
||||
the phone, and encapsulate raw IP packets received from the external
|
||||
packet network in downlink towards the phone.
|
||||
|
||||
It *only* implements the so-called 'user plane', carrying the User-IP
|
||||
payload, called GTP-U. It does not implement the 'control plane',
|
||||
which is a signaling protocol used for establishment and teardown of
|
||||
GTP tunnels (GTP-C).
|
||||
|
||||
So in order to have a working GGSN/P-GW setup, you will need a
|
||||
userspace program that implements the GTP-C protocol and which then
|
||||
uses the netlink interface provided by the GTP-U module in the kernel
|
||||
to configure the kernel module.
|
||||
|
||||
This split architecture follows the tunneling modules of other
|
||||
protocols, e.g. PPPoE or L2TP, where you also run a userspace daemon
|
||||
to handle the tunnel establishment, authentication etc. and only the
|
||||
data plane is accelerated inside the kernel.
|
||||
|
||||
Don't be confused by terminology: The GTP User Plane goes through
|
||||
kernel accelerated path, while the GTP Control Plane goes to
|
||||
Userspace :)
|
||||
|
||||
The official homepge of the module is at
|
||||
https://osmocom.org/projects/linux-kernel-gtp-u/wiki
|
||||
|
||||
== Userspace Programs with Linux Kernel GTP-U support ==
|
||||
|
||||
At the time of this writing, there are at least two Free Software
|
||||
implementations that implement GTP-C and can use the netlink interface
|
||||
to make use of the Linux kernel GTP-U support:
|
||||
|
||||
* OpenGGSN (classic 2G/3G GGSN in C):
|
||||
https://osmocom.org/projects/openggsn/wiki/OpenGGSN
|
||||
|
||||
* ergw (GGSN + P-GW in Erlang):
|
||||
https://github.com/travelping/ergw
|
||||
|
||||
== Userspace Library / Command Line Utilities ==
|
||||
|
||||
There is a userspace library called 'libgtpnl' which is based on
|
||||
libmnl and which implements a C-language API towards the netlink
|
||||
interface provided by the Kernel GTP module:
|
||||
|
||||
http://git.osmocom.org/libgtpnl/
|
||||
|
||||
== Protocol Versions ==
|
||||
|
||||
There are two different versions of GTP-U: v0 and v1. Both are
|
||||
implemented in the Kernel GTP module. Version 0 is a legacy version,
|
||||
and deprecated from recent 3GPP specifications.
|
||||
|
||||
There are three versions of GTP-C: v0, v1, and v2. As the kernel
|
||||
doesn't implement GTP-C, we don't have to worry about this. It's the
|
||||
responsibility of the control plane implementation in userspace to
|
||||
implement that.
|
||||
|
||||
== IPv6 ==
|
||||
|
||||
The 3GPP specifications indicate either IPv4 or IPv6 can be used both
|
||||
on the inner (user) IP layer, or on the outer (transport) layer.
|
||||
|
||||
Unfortunately, the Kernel module currently supports IPv6 neither for
|
||||
the User IP payload, nor for the outer IP layer. Patches or other
|
||||
Contributions to fix this are most welcome!
|
||||
|
||||
== Mailing List ==
|
||||
|
||||
If yo have questions regarding how to use the Kernel GTP module from
|
||||
your own software, or want to contribute to the code, please use the
|
||||
osmocom-net-grps mailing list for related discussion. The list can be
|
||||
reached at osmocom-net-gprs@lists.osmocom.org and the mailman
|
||||
interface for managign your subscription is at
|
||||
https://lists.osmocom.org/mailman/listinfo/osmocom-net-gprs
|
||||
|
||||
== Issue Tracker ==
|
||||
|
||||
The Osmocom project maintains an issue tracker for the Kernel GTP-U
|
||||
module at
|
||||
https://osmocom.org/projects/linux-kernel-gtp-u/issues
|
||||
|
||||
== History / Acknowledgements ==
|
||||
|
||||
The Module was originally created in 2012 by Harald Welte, but never
|
||||
completed. Pablo came in to finish the mess Harald left behind. But
|
||||
doe to a lack of user interest, it never got merged.
|
||||
|
||||
In 2015, Andreas Schultz came to the rescue and fixed lots more bugs,
|
||||
extended it with new features and finally pushed all of us to get it
|
||||
mainline, where it was merged in 4.7.0.
|
|
@ -246,21 +246,12 @@ tcp_dsack - BOOLEAN
|
|||
Allows TCP to send "duplicate" SACKs.
|
||||
|
||||
tcp_early_retrans - INTEGER
|
||||
Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold
|
||||
for triggering fast retransmit when the amount of outstanding data is
|
||||
small and when no previously unsent data can be transmitted (such
|
||||
that limited transmit could be used). Also controls the use of
|
||||
Tail loss probe (TLP) that converts RTOs occurring due to tail
|
||||
losses into fast recovery (draft-dukkipati-tcpm-tcp-loss-probe-01).
|
||||
Tail loss probe (TLP) converts RTOs occurring due to tail
|
||||
losses into fast recovery (draft-ietf-tcpm-rack). Note that
|
||||
TLP requires RACK to function properly (see tcp_recovery below)
|
||||
Possible values:
|
||||
0 disables ER
|
||||
1 enables ER
|
||||
2 enables ER but delays fast recovery and fast retransmit
|
||||
by a fourth of RTT. This mitigates connection falsely
|
||||
recovers when network has a small degree of reordering
|
||||
(less than 3 packets).
|
||||
3 enables delayed ER and TLP.
|
||||
4 enables TLP only.
|
||||
0 disables TLP
|
||||
3 or 4 enables TLP
|
||||
Default: 3
|
||||
|
||||
tcp_ecn - INTEGER
|
||||
|
@ -712,18 +703,6 @@ tcp_thin_linear_timeouts - BOOLEAN
|
|||
Documentation/networking/tcp-thin.txt
|
||||
Default: 0
|
||||
|
||||
tcp_thin_dupack - BOOLEAN
|
||||
Enable dynamic triggering of retransmissions after one dupACK
|
||||
for thin streams. If set, a check is performed upon reception
|
||||
of a dupACK to determine if the stream is thin (less than 4
|
||||
packets in flight). As long as the stream is found to be thin,
|
||||
data is retransmitted on the first received dupACK. This
|
||||
improves retransmission latency for non-aggressive thin
|
||||
streams, often found to be time-dependent.
|
||||
For more information on thin streams, see
|
||||
Documentation/networking/tcp-thin.txt
|
||||
Default: 0
|
||||
|
||||
tcp_limit_output_bytes - INTEGER
|
||||
Controls TCP Small Queue limit per tcp socket.
|
||||
TCP bulk sender tends to increase packets in flight until it
|
||||
|
@ -742,6 +721,13 @@ tcp_challenge_ack_limit - INTEGER
|
|||
|
||||
UDP variables:
|
||||
|
||||
udp_l3mdev_accept - BOOLEAN
|
||||
Enabling this option allows a "global" bound socket to work
|
||||
across L3 master domains (e.g., VRFs) with packets capable of
|
||||
being received regardless of the L3 domain in which they
|
||||
originated. Only valid when the kernel was compiled with
|
||||
CONFIG_NET_L3_MASTER_DEV.
|
||||
|
||||
udp_mem - vector of 3 INTEGERs: min, pressure, max
|
||||
Number of pages allowed for queueing by all UDP sockets.
|
||||
|
||||
|
@ -843,6 +829,15 @@ ip_local_reserved_ports - list of comma separated ranges
|
|||
|
||||
Default: Empty
|
||||
|
||||
ip_unprivileged_port_start - INTEGER
|
||||
This is a per-namespace sysctl. It defines the first
|
||||
unprivileged port in the network namespace. Privileged ports
|
||||
require root or CAP_NET_BIND_SERVICE in order to bind to them.
|
||||
To disable all privileged ports, set this to 0. It may not
|
||||
overlap with the ip_local_reserved_ports range.
|
||||
|
||||
Default: 1024
|
||||
|
||||
ip_nonlocal_bind - BOOLEAN
|
||||
If set, allows processes to bind() to non-local IP addresses,
|
||||
which can be quite useful - but may break some applications.
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
/proc/sys/net/netfilter/* Variables:
|
||||
|
||||
nf_log_all_netns - BOOLEAN
|
||||
0 - disabled (default)
|
||||
not 0 - enabled
|
||||
|
||||
By default, only init_net namespace can log packets into kernel log
|
||||
with LOG target; this aims to prevent containers from flooding host
|
||||
kernel log. If enabled, this target also works in other network
|
||||
namespaces. This variable is only accessible from init_net.
|
|
@ -565,7 +565,7 @@ TPACKET_V1 --> TPACKET_V2:
|
|||
(void *)hdr + TPACKET_ALIGN(sizeof(struct tpacket_hdr))
|
||||
|
||||
TPACKET_V2 --> TPACKET_V3:
|
||||
- Flexible buffer implementation:
|
||||
- Flexible buffer implementation for RX_RING:
|
||||
1. Blocks can be configured with non-static frame-size
|
||||
2. Read/poll is at a block-level (as opposed to packet-level)
|
||||
3. Added poll timeout to avoid indefinite user-space wait
|
||||
|
@ -574,7 +574,12 @@ TPACKET_V2 --> TPACKET_V3:
|
|||
4.1 block::timeout
|
||||
4.2 tpkt_hdr::sk_rxhash
|
||||
- RX Hash data available in user space
|
||||
- Currently only RX_RING available
|
||||
- TX_RING semantics are conceptually similar to TPACKET_V2;
|
||||
use tpacket3_hdr instead of tpacket2_hdr, and TPACKET3_HDRLEN
|
||||
instead of TPACKET2_HDRLEN. In the current implementation,
|
||||
the tp_next_offset field in the tpacket3_hdr MUST be set to
|
||||
zero, indicating that the ring does not hold variable sized frames.
|
||||
Packets with non-zero values of tp_next_offset will be dropped.
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
+ AF_PACKET fanout mode
|
||||
|
|
|
@ -156,12 +156,12 @@ struct ieee80211_regdomain mydriver_jp_regdom = {
|
|||
//.alpha2 = "99", /* If I have no alpha2 to map it to */
|
||||
.reg_rules = {
|
||||
/* IEEE 802.11b/g, channels 1..14 */
|
||||
REG_RULE(2412-20, 2484+20, 40, 6, 20, 0),
|
||||
REG_RULE(2412-10, 2484+10, 40, 6, 20, 0),
|
||||
/* IEEE 802.11a, channels 34..48 */
|
||||
REG_RULE(5170-20, 5240+20, 40, 6, 20,
|
||||
REG_RULE(5170-10, 5240+10, 40, 6, 20,
|
||||
NL80211_RRF_NO_IR),
|
||||
/* IEEE 802.11a, channels 52..64 */
|
||||
REG_RULE(5260-20, 5320+20, 40, 6, 20,
|
||||
REG_RULE(5260-10, 5320+10, 40, 6, 20,
|
||||
NL80211_RRF_NO_IR|
|
||||
NL80211_RRF_DFS),
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ the data in regdb.c as an alternative to using CRDA.
|
|||
The file net/wireless/db.txt should be kept up-to-date with the db.txt
|
||||
file available in the git repository here:
|
||||
|
||||
git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
|
||||
git://git.kernel.org/pub/scm/linux/kernel/git/sforshee/wireless-regdb.git
|
||||
|
||||
Again, most users in most situations should be using the CRDA package
|
||||
provided with their distribution, and in most other situations users
|
||||
|
|
|
@ -98,10 +98,11 @@ VRF device:
|
|||
|
||||
or to specify the output device using cmsg and IP_PKTINFO.
|
||||
|
||||
TCP services running in the default VRF context (ie., not bound to any VRF
|
||||
device) can work across all VRF domains by enabling the tcp_l3mdev_accept
|
||||
sysctl option:
|
||||
TCP & UDP services running in the default VRF context (ie., not bound
|
||||
to any VRF device) can work across all VRF domains by enabling the
|
||||
tcp_l3mdev_accept and udp_l3mdev_accept sysctl options:
|
||||
sysctl -w net.ipv4.tcp_l3mdev_accept=1
|
||||
sysctl -w net.ipv4.udp_l3mdev_accept=1
|
||||
|
||||
netfilter rules on the VRF device can be used to limit access to services
|
||||
running in the default VRF context as well.
|
||||
|
|
|
@ -0,0 +1,175 @@
|
|||
SipHash - a short input PRF
|
||||
-----------------------------------------------
|
||||
Written by Jason A. Donenfeld <jason@zx2c4.com>
|
||||
|
||||
SipHash is a cryptographically secure PRF -- a keyed hash function -- that
|
||||
performs very well for short inputs, hence the name. It was designed by
|
||||
cryptographers Daniel J. Bernstein and Jean-Philippe Aumasson. It is intended
|
||||
as a replacement for some uses of: `jhash`, `md5_transform`, `sha_transform`,
|
||||
and so forth.
|
||||
|
||||
SipHash takes a secret key filled with randomly generated numbers and either
|
||||
an input buffer or several input integers. It spits out an integer that is
|
||||
indistinguishable from random. You may then use that integer as part of secure
|
||||
sequence numbers, secure cookies, or mask it off for use in a hash table.
|
||||
|
||||
1. Generating a key
|
||||
|
||||
Keys should always be generated from a cryptographically secure source of
|
||||
random numbers, either using get_random_bytes or get_random_once:
|
||||
|
||||
siphash_key_t key;
|
||||
get_random_bytes(&key, sizeof(key));
|
||||
|
||||
If you're not deriving your key from here, you're doing it wrong.
|
||||
|
||||
2. Using the functions
|
||||
|
||||
There are two variants of the function, one that takes a list of integers, and
|
||||
one that takes a buffer:
|
||||
|
||||
u64 siphash(const void *data, size_t len, const siphash_key_t *key);
|
||||
|
||||
And:
|
||||
|
||||
u64 siphash_1u64(u64, const siphash_key_t *key);
|
||||
u64 siphash_2u64(u64, u64, const siphash_key_t *key);
|
||||
u64 siphash_3u64(u64, u64, u64, const siphash_key_t *key);
|
||||
u64 siphash_4u64(u64, u64, u64, u64, const siphash_key_t *key);
|
||||
u64 siphash_1u32(u32, const siphash_key_t *key);
|
||||
u64 siphash_2u32(u32, u32, const siphash_key_t *key);
|
||||
u64 siphash_3u32(u32, u32, u32, const siphash_key_t *key);
|
||||
u64 siphash_4u32(u32, u32, u32, u32, const siphash_key_t *key);
|
||||
|
||||
If you pass the generic siphash function something of a constant length, it
|
||||
will constant fold at compile-time and automatically choose one of the
|
||||
optimized functions.
|
||||
|
||||
3. Hashtable key function usage:
|
||||
|
||||
struct some_hashtable {
|
||||
DECLARE_HASHTABLE(hashtable, 8);
|
||||
siphash_key_t key;
|
||||
};
|
||||
|
||||
void init_hashtable(struct some_hashtable *table)
|
||||
{
|
||||
get_random_bytes(&table->key, sizeof(table->key));
|
||||
}
|
||||
|
||||
static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
|
||||
{
|
||||
return &table->hashtable[siphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
|
||||
}
|
||||
|
||||
You may then iterate like usual over the returned hash bucket.
|
||||
|
||||
4. Security
|
||||
|
||||
SipHash has a very high security margin, with its 128-bit key. So long as the
|
||||
key is kept secret, it is impossible for an attacker to guess the outputs of
|
||||
the function, even if being able to observe many outputs, since 2^128 outputs
|
||||
is significant.
|
||||
|
||||
Linux implements the "2-4" variant of SipHash.
|
||||
|
||||
5. Struct-passing Pitfalls
|
||||
|
||||
Often times the XuY functions will not be large enough, and instead you'll
|
||||
want to pass a pre-filled struct to siphash. When doing this, it's important
|
||||
to always ensure the struct has no padding holes. The easiest way to do this
|
||||
is to simply arrange the members of the struct in descending order of size,
|
||||
and to use offsetendof() instead of sizeof() for getting the size. For
|
||||
performance reasons, if possible, it's probably a good thing to align the
|
||||
struct to the right boundary. Here's an example:
|
||||
|
||||
const struct {
|
||||
struct in6_addr saddr;
|
||||
u32 counter;
|
||||
u16 dport;
|
||||
} __aligned(SIPHASH_ALIGNMENT) combined = {
|
||||
.saddr = *(struct in6_addr *)saddr,
|
||||
.counter = counter,
|
||||
.dport = dport
|
||||
};
|
||||
u64 h = siphash(&combined, offsetofend(typeof(combined), dport), &secret);
|
||||
|
||||
6. Resources
|
||||
|
||||
Read the SipHash paper if you're interested in learning more:
|
||||
https://131002.net/siphash/siphash.pdf
|
||||
|
||||
|
||||
~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
|
||||
|
||||
HalfSipHash - SipHash's insecure younger cousin
|
||||
-----------------------------------------------
|
||||
Written by Jason A. Donenfeld <jason@zx2c4.com>
|
||||
|
||||
On the off-chance that SipHash is not fast enough for your needs, you might be
|
||||
able to justify using HalfSipHash, a terrifying but potentially useful
|
||||
possibility. HalfSipHash cuts SipHash's rounds down from "2-4" to "1-3" and,
|
||||
even scarier, uses an easily brute-forcable 64-bit key (with a 32-bit output)
|
||||
instead of SipHash's 128-bit key. However, this may appeal to some
|
||||
high-performance `jhash` users.
|
||||
|
||||
Danger!
|
||||
|
||||
Do not ever use HalfSipHash except for as a hashtable key function, and only
|
||||
then when you can be absolutely certain that the outputs will never be
|
||||
transmitted out of the kernel. This is only remotely useful over `jhash` as a
|
||||
means of mitigating hashtable flooding denial of service attacks.
|
||||
|
||||
1. Generating a key
|
||||
|
||||
Keys should always be generated from a cryptographically secure source of
|
||||
random numbers, either using get_random_bytes or get_random_once:
|
||||
|
||||
hsiphash_key_t key;
|
||||
get_random_bytes(&key, sizeof(key));
|
||||
|
||||
If you're not deriving your key from here, you're doing it wrong.
|
||||
|
||||
2. Using the functions
|
||||
|
||||
There are two variants of the function, one that takes a list of integers, and
|
||||
one that takes a buffer:
|
||||
|
||||
u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key);
|
||||
|
||||
And:
|
||||
|
||||
u32 hsiphash_1u32(u32, const hsiphash_key_t *key);
|
||||
u32 hsiphash_2u32(u32, u32, const hsiphash_key_t *key);
|
||||
u32 hsiphash_3u32(u32, u32, u32, const hsiphash_key_t *key);
|
||||
u32 hsiphash_4u32(u32, u32, u32, u32, const hsiphash_key_t *key);
|
||||
|
||||
If you pass the generic hsiphash function something of a constant length, it
|
||||
will constant fold at compile-time and automatically choose one of the
|
||||
optimized functions.
|
||||
|
||||
3. Hashtable key function usage:
|
||||
|
||||
struct some_hashtable {
|
||||
DECLARE_HASHTABLE(hashtable, 8);
|
||||
hsiphash_key_t key;
|
||||
};
|
||||
|
||||
void init_hashtable(struct some_hashtable *table)
|
||||
{
|
||||
get_random_bytes(&table->key, sizeof(table->key));
|
||||
}
|
||||
|
||||
static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
|
||||
{
|
||||
return &table->hashtable[hsiphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
|
||||
}
|
||||
|
||||
You may then iterate like usual over the returned hash bucket.
|
||||
|
||||
4. Performance
|
||||
|
||||
HalfSipHash is roughly 3 times slower than JenkinsHash. For many replacements,
|
||||
this will not be a problem, as the hashtable lookup isn't the bottleneck. And
|
||||
in general, this is probably a good sacrifice to make for the security and DoS
|
||||
resistance of HalfSipHash.
|
|
@ -54,6 +54,18 @@ Values :
|
|||
1 - enable JIT hardening for unprivileged users only
|
||||
2 - enable JIT hardening for all users
|
||||
|
||||
bpf_jit_kallsyms
|
||||
----------------
|
||||
|
||||
When Berkeley Packet Filter Just in Time compiler is enabled, then compiled
|
||||
images are unknown addresses to the kernel, meaning they neither show up in
|
||||
traces nor in /proc/kallsyms. This enables export of these addresses, which
|
||||
can be used for debugging/tracing. If bpf_jit_harden is enabled, this feature
|
||||
is disabled.
|
||||
Values :
|
||||
0 - disable JIT kallsyms export (default value)
|
||||
1 - enable JIT kallsyms export for privileged users only
|
||||
|
||||
dev_weight
|
||||
--------------
|
||||
|
||||
|
@ -61,6 +73,27 @@ The maximum number of packets that kernel can handle on a NAPI interrupt,
|
|||
it's a Per-CPU variable.
|
||||
Default: 64
|
||||
|
||||
dev_weight_rx_bias
|
||||
--------------
|
||||
|
||||
RPS (e.g. RFS, aRFS) processing is competing with the registered NAPI poll function
|
||||
of the driver for the per softirq cycle netdev_budget. This parameter influences
|
||||
the proportion of the configured netdev_budget that is spent on RPS based packet
|
||||
processing during RX softirq cycles. It is further meant for making current
|
||||
dev_weight adaptable for asymmetric CPU needs on RX/TX side of the network stack.
|
||||
(see dev_weight_tx_bias) It is effective on a per CPU basis. Determination is based
|
||||
on dev_weight and is calculated multiplicative (dev_weight * dev_weight_rx_bias).
|
||||
Default: 1
|
||||
|
||||
dev_weight_tx_bias
|
||||
--------------
|
||||
|
||||
Scales the maximum number of packets that can be processed during a TX softirq cycle.
|
||||
Effective on a per CPU basis. Allows scaling of current dev_weight for asymmetric
|
||||
net stack processing needs. Be careful to avoid making TX softirq processing a CPU hog.
|
||||
Calculation is based on dev_weight (dev_weight * dev_weight_tx_bias).
|
||||
Default: 1
|
||||
|
||||
default_qdisc
|
||||
--------------
|
||||
|
||||
|
|
63
MAINTAINERS
63
MAINTAINERS
|
@ -2607,6 +2607,12 @@ L: netdev@vger.kernel.org
|
|||
S: Supported
|
||||
F: drivers/net/ethernet/broadcom/bnx2x/
|
||||
|
||||
BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
|
||||
M: Michael Chan <michael.chan@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/broadcom/bnxt/
|
||||
|
||||
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
M: Ray Jui <rjui@broadcom.com>
|
||||
|
@ -5647,6 +5653,14 @@ T: git git://linuxtv.org/media_tree.git
|
|||
S: Odd Fixes
|
||||
F: drivers/media/usb/gspca/
|
||||
|
||||
GTP (GPRS Tunneling Protocol)
|
||||
M: Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
M: Harald Welte <laforge@gnumonks.org>
|
||||
L: osmocom-net-gprs@lists.osmocom.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/gtp.git
|
||||
S: Maintained
|
||||
F: drivers/net/gtp.c
|
||||
|
||||
GUID PARTITION TABLE (GPT)
|
||||
M: Davidlohr Bueso <dave@stgolabs.net>
|
||||
L: linux-efi@vger.kernel.org
|
||||
|
@ -6249,6 +6263,13 @@ F: include/net/cfg802154.h
|
|||
F: include/net/ieee802154_netdev.h
|
||||
F: Documentation/networking/ieee802154.txt
|
||||
|
||||
IFE PROTOCOL
|
||||
M: Yotam Gigi <yotamg@mellanox.com>
|
||||
M: Jamal Hadi Salim <jhs@mojatatu.com>
|
||||
F: net/ife
|
||||
F: include/net/ife.h
|
||||
F: include/uapi/linux/ife.h
|
||||
|
||||
IGORPLUG-USB IR RECEIVER
|
||||
M: Sean Young <sean@mess.org>
|
||||
L: linux-media@vger.kernel.org
|
||||
|
@ -8577,9 +8598,8 @@ F: Documentation/networking/s2io.txt
|
|||
F: Documentation/networking/vxge.txt
|
||||
F: drivers/net/ethernet/neterion/
|
||||
|
||||
NETFILTER ({IP,IP6,ARP,EB,NF}TABLES)
|
||||
NETFILTER
|
||||
M: Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
|
||||
L: netfilter-devel@vger.kernel.org
|
||||
L: coreteam@netfilter.org
|
||||
|
@ -9396,6 +9416,14 @@ F: drivers/video/fbdev/sti*
|
|||
F: drivers/video/console/sti*
|
||||
F: drivers/video/logo/logo_parisc*
|
||||
|
||||
PARMAN
|
||||
M: Jiri Pirko <jiri@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: lib/parman.c
|
||||
F: lib/test_parman.c
|
||||
F: include/linux/parman.h
|
||||
|
||||
PC87360 HARDWARE MONITORING DRIVER
|
||||
M: Jim Cromie <jim.cromie@gmail.com>
|
||||
L: linux-hwmon@vger.kernel.org
|
||||
|
@ -9994,6 +10022,13 @@ L: linuxppc-dev@lists.ozlabs.org
|
|||
S: Maintained
|
||||
F: drivers/block/ps3vram.c
|
||||
|
||||
PSAMPLE PACKET SAMPLING SUPPORT:
|
||||
M: Yotam Gigi <yotamg@mellanox.com>
|
||||
S: Maintained
|
||||
F: net/psample
|
||||
F: include/net/psample.h
|
||||
F: include/uapi/linux/psample.h
|
||||
|
||||
PSTORE FILESYSTEM
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Anton Vorontsov <anton@enomsg.org>
|
||||
|
@ -10623,7 +10658,7 @@ F: drivers/net/wireless/realtek/rtlwifi/
|
|||
F: drivers/net/wireless/realtek/rtlwifi/rtl8192ce/
|
||||
|
||||
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
|
||||
M: Jes Sorensen <Jes.Sorensen@redhat.com>
|
||||
M: Jes Sorensen <Jes.Sorensen@gmail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel
|
||||
S: Maintained
|
||||
|
@ -10889,6 +10924,13 @@ S: Maintained
|
|||
F: drivers/staging/media/st-cec/
|
||||
F: Documentation/devicetree/bindings/media/stih-cec.txt
|
||||
|
||||
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
|
||||
M: Ursula Braun <ubraun@linux.vnet.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
S: Supported
|
||||
F: net/smc/
|
||||
|
||||
SYNOPSYS DESIGNWARE DMAC DRIVER
|
||||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
|
@ -10897,13 +10939,6 @@ F: include/linux/dma/dw.h
|
|||
F: include/linux/platform_data/dma-dw.h
|
||||
F: drivers/dma/dw/
|
||||
|
||||
SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
|
||||
M: Lars Persson <lars.persson@axis.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
|
||||
F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
|
||||
|
||||
SYNOPSYS DESIGNWARE I2C DRIVER
|
||||
M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
|
||||
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
|
@ -11356,6 +11391,13 @@ F: arch/arm/mach-s3c24xx/mach-bast.c
|
|||
F: arch/arm/mach-s3c24xx/bast-ide.c
|
||||
F: arch/arm/mach-s3c24xx/bast-irq.c
|
||||
|
||||
SIPHASH PRF ROUTINES
|
||||
M: Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
S: Maintained
|
||||
F: lib/siphash.c
|
||||
F: lib/test_siphash.c
|
||||
F: include/linux/siphash.h
|
||||
|
||||
TI DAVINCI MACHINE SUPPORT
|
||||
M: Sekhar Nori <nsekhar@ti.com>
|
||||
M: Kevin Hilman <khilman@kernel.org>
|
||||
|
@ -11927,6 +11969,7 @@ F: include/linux/swiotlb.h
|
|||
|
||||
SWITCHDEV
|
||||
M: Jiri Pirko <jiri@resnulli.us>
|
||||
M: Ivan Vecera <ivecera@redhat.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: net/switchdev/
|
||||
|
|
|
@ -222,6 +222,10 @@ config GENERIC_SMP_IDLE_THREAD
|
|||
config GENERIC_IDLE_POLL_SETUP
|
||||
bool
|
||||
|
||||
# Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h
|
||||
config ARCH_HAS_SET_MEMORY
|
||||
bool
|
||||
|
||||
# Select if arch init_task initializer is different to init/init_task.c
|
||||
config ARCH_INIT_TASK
|
||||
bool
|
||||
|
@ -837,4 +841,7 @@ config STRICT_MODULE_RWX
|
|||
and non-text memory will be made non-executable. This provides
|
||||
protection against certain security exploits (e.g. writing to text)
|
||||
|
||||
config ARCH_WANT_RELAX_ORDER
|
||||
bool
|
||||
|
||||
source "kernel/gcov/Kconfig"
|
||||
|
|
|
@ -4,6 +4,7 @@ config ARM
|
|||
select ARCH_CLOCKSOURCE_DATA
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
|
||||
select ARCH_HAS_STRICT_MODULE_RWX if MMU
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
|
|
|
@ -782,7 +782,6 @@
|
|||
cpdma_channels = <8>;
|
||||
ale_entries = <1024>;
|
||||
bd_ram_size = <0x2000>;
|
||||
no_bd_ram = <0>;
|
||||
mac_control = <0x20>;
|
||||
slaves = <2>;
|
||||
active_slave = <0>;
|
||||
|
|
|
@ -670,7 +670,6 @@
|
|||
cpdma_channels = <8>;
|
||||
ale_entries = <1024>;
|
||||
bd_ram_size = <0x2000>;
|
||||
no_bd_ram = <0>;
|
||||
mac_control = <0x20>;
|
||||
slaves = <2>;
|
||||
active_slave = <0>;
|
||||
|
|
|
@ -510,7 +510,6 @@
|
|||
cpdma_channels = <8>;
|
||||
ale_entries = <1024>;
|
||||
bd_ram_size = <0x2000>;
|
||||
no_bd_ram = <0>;
|
||||
mac_control = <0x20>;
|
||||
slaves = <2>;
|
||||
active_slave = <0>;
|
||||
|
|
|
@ -1709,7 +1709,6 @@
|
|||
cpdma_channels = <8>;
|
||||
ale_entries = <1024>;
|
||||
bd_ram_size = <0x2000>;
|
||||
no_bd_ram = <0>;
|
||||
mac_control = <0x20>;
|
||||
slaves = <2>;
|
||||
active_slave = <0>;
|
||||
|
|
|
@ -253,7 +253,8 @@ CONFIG_R8169=y
|
|||
CONFIG_SH_ETH=y
|
||||
CONFIG_SMSC911X=y
|
||||
CONFIG_STMMAC_ETH=y
|
||||
CONFIG_SYNOPSYS_DWC_ETH_QOS=y
|
||||
CONFIG_STMMAC_PLATFORM=y
|
||||
CONFIG_DWMAC_DWC_QOS_ETH=y
|
||||
CONFIG_TI_CPSW=y
|
||||
CONFIG_XILINX_EMACLITE=y
|
||||
CONFIG_AT803X_PHY=y
|
||||
|
|
|
@ -105,7 +105,7 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
|
|||
/*****************************************************************************
|
||||
* Ethernet switch
|
||||
****************************************************************************/
|
||||
void __init orion5x_eth_switch_init(struct dsa_platform_data *d)
|
||||
void __init orion5x_eth_switch_init(struct dsa_chip_data *d)
|
||||
{
|
||||
orion_ge00_switch_init(d);
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
#include <linux/reboot.h>
|
||||
|
||||
struct dsa_platform_data;
|
||||
struct dsa_chip_data;
|
||||
struct mv643xx_eth_platform_data;
|
||||
struct mv_sata_platform_data;
|
||||
|
||||
|
@ -41,7 +41,7 @@ void orion5x_setup_wins(void);
|
|||
void orion5x_ehci0_init(void);
|
||||
void orion5x_ehci1_init(void);
|
||||
void orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data);
|
||||
void orion5x_eth_switch_init(struct dsa_platform_data *d);
|
||||
void orion5x_eth_switch_init(struct dsa_chip_data *d);
|
||||
void orion5x_i2c_init(void);
|
||||
void orion5x_sata_init(struct mv_sata_platform_data *sata_data);
|
||||
void orion5x_spi_init(void);
|
||||
|
|
|
@ -101,11 +101,6 @@ static struct dsa_chip_data rd88f5181l_fxo_switch_chip_data = {
|
|||
.port_names[7] = "lan3",
|
||||
};
|
||||
|
||||
static struct dsa_platform_data __initdata rd88f5181l_fxo_switch_plat_data = {
|
||||
.nr_chips = 1,
|
||||
.chip = &rd88f5181l_fxo_switch_chip_data,
|
||||
};
|
||||
|
||||
static void __init rd88f5181l_fxo_init(void)
|
||||
{
|
||||
/*
|
||||
|
@ -120,7 +115,7 @@ static void __init rd88f5181l_fxo_init(void)
|
|||
*/
|
||||
orion5x_ehci0_init();
|
||||
orion5x_eth_init(&rd88f5181l_fxo_eth_data);
|
||||
orion5x_eth_switch_init(&rd88f5181l_fxo_switch_plat_data);
|
||||
orion5x_eth_switch_init(&rd88f5181l_fxo_switch_chip_data);
|
||||
orion5x_uart0_init();
|
||||
|
||||
mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
|
||||
|
|
|
@ -102,11 +102,6 @@ static struct dsa_chip_data rd88f5181l_ge_switch_chip_data = {
|
|||
.port_names[7] = "lan3",
|
||||
};
|
||||
|
||||
static struct dsa_platform_data __initdata rd88f5181l_ge_switch_plat_data = {
|
||||
.nr_chips = 1,
|
||||
.chip = &rd88f5181l_ge_switch_chip_data,
|
||||
};
|
||||
|
||||
static struct i2c_board_info __initdata rd88f5181l_ge_i2c_rtc = {
|
||||
I2C_BOARD_INFO("ds1338", 0x68),
|
||||
};
|
||||
|
@ -125,7 +120,7 @@ static void __init rd88f5181l_ge_init(void)
|
|||
*/
|
||||
orion5x_ehci0_init();
|
||||
orion5x_eth_init(&rd88f5181l_ge_eth_data);
|
||||
orion5x_eth_switch_init(&rd88f5181l_ge_switch_plat_data);
|
||||
orion5x_eth_switch_init(&rd88f5181l_ge_switch_chip_data);
|
||||
orion5x_i2c_init();
|
||||
orion5x_uart0_init();
|
||||
|
||||
|
|
|
@ -40,11 +40,6 @@ static struct dsa_chip_data rd88f6183ap_ge_switch_chip_data = {
|
|||
.port_names[5] = "cpu",
|
||||
};
|
||||
|
||||
static struct dsa_platform_data __initdata rd88f6183ap_ge_switch_plat_data = {
|
||||
.nr_chips = 1,
|
||||
.chip = &rd88f6183ap_ge_switch_chip_data,
|
||||
};
|
||||
|
||||
static struct mtd_partition rd88f6183ap_ge_partitions[] = {
|
||||
{
|
||||
.name = "kernel",
|
||||
|
@ -89,7 +84,7 @@ static void __init rd88f6183ap_ge_init(void)
|
|||
*/
|
||||
orion5x_ehci0_init();
|
||||
orion5x_eth_init(&rd88f6183ap_ge_eth_data);
|
||||
orion5x_eth_switch_init(&rd88f6183ap_ge_switch_plat_data);
|
||||
orion5x_eth_switch_init(&rd88f6183ap_ge_switch_chip_data);
|
||||
spi_register_board_info(rd88f6183ap_ge_spi_slave_info,
|
||||
ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info));
|
||||
orion5x_spi_init();
|
||||
|
|
|
@ -106,11 +106,6 @@ static struct dsa_chip_data wnr854t_switch_chip_data = {
|
|||
.port_names[7] = "lan2",
|
||||
};
|
||||
|
||||
static struct dsa_platform_data __initdata wnr854t_switch_plat_data = {
|
||||
.nr_chips = 1,
|
||||
.chip = &wnr854t_switch_chip_data,
|
||||
};
|
||||
|
||||
static void __init wnr854t_init(void)
|
||||
{
|
||||
/*
|
||||
|
@ -124,7 +119,7 @@ static void __init wnr854t_init(void)
|
|||
* Configure peripherals.
|
||||
*/
|
||||
orion5x_eth_init(&wnr854t_eth_data);
|
||||
orion5x_eth_switch_init(&wnr854t_switch_plat_data);
|
||||
orion5x_eth_switch_init(&wnr854t_switch_chip_data);
|
||||
orion5x_uart0_init();
|
||||
|
||||
mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
|
||||
|
|
|
@ -191,11 +191,6 @@ static struct dsa_chip_data wrt350n_v2_switch_chip_data = {
|
|||
.port_names[7] = "lan4",
|
||||
};
|
||||
|
||||
static struct dsa_platform_data __initdata wrt350n_v2_switch_plat_data = {
|
||||
.nr_chips = 1,
|
||||
.chip = &wrt350n_v2_switch_chip_data,
|
||||
};
|
||||
|
||||
static void __init wrt350n_v2_init(void)
|
||||
{
|
||||
/*
|
||||
|
@ -210,7 +205,7 @@ static void __init wrt350n_v2_init(void)
|
|||
*/
|
||||
orion5x_ehci0_init();
|
||||
orion5x_eth_init(&wrt350n_v2_eth_data);
|
||||
orion5x_eth_switch_init(&wrt350n_v2_switch_plat_data);
|
||||
orion5x_eth_switch_init(&wrt350n_v2_switch_chip_data);
|
||||
orion5x_uart0_init();
|
||||
|
||||
mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/platform_data/dma-mv_xor.h>
|
||||
#include <linux/platform_data/usb-ehci-orion.h>
|
||||
#include <plat/common.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
/* Create a clkdev entry for a given device/clk */
|
||||
void __init orion_clkdev_add(const char *con_id, const char *dev_id,
|
||||
|
@ -470,15 +471,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
|
|||
/*****************************************************************************
|
||||
* Ethernet switch
|
||||
****************************************************************************/
|
||||
void __init orion_ge00_switch_init(struct dsa_platform_data *d)
|
||||
static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii";
|
||||
static __initdata struct mdio_board_info
|
||||
orion_ge00_switch_board_info;
|
||||
|
||||
void __init orion_ge00_switch_init(struct dsa_chip_data *d)
|
||||
{
|
||||
int i;
|
||||
struct mdio_board_info *bd;
|
||||
unsigned int i;
|
||||
|
||||
d->netdev = &orion_ge00.dev;
|
||||
for (i = 0; i < d->nr_chips; i++)
|
||||
d->chip[i].host_dev = &orion_ge_mvmdio.dev;
|
||||
for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
|
||||
if (!strcmp(d->port_names[i], "cpu"))
|
||||
break;
|
||||
|
||||
platform_device_register_data(NULL, "dsa", 0, d, sizeof(d));
|
||||
bd = &orion_ge00_switch_board_info;
|
||||
bd->bus_id = orion_ge00_mvmdio_bus_name;
|
||||
bd->mdio_addr = d->sw_addr;
|
||||
d->netdev[i] = &orion_ge00.dev;
|
||||
strcpy(bd->modalias, "mv88e6085");
|
||||
bd->platform_data = d;
|
||||
|
||||
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <linux/mv643xx_eth.h>
|
||||
#include <linux/platform_data/usb-ehci-orion.h>
|
||||
|
||||
struct dsa_platform_data;
|
||||
struct dsa_chip_data;
|
||||
struct mv_sata_platform_data;
|
||||
|
||||
void __init orion_uart0_init(void __iomem *membase,
|
||||
|
@ -57,7 +57,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
|
|||
unsigned long mapbase,
|
||||
unsigned long irq);
|
||||
|
||||
void __init orion_ge00_switch_init(struct dsa_platform_data *d);
|
||||
void __init orion_ge00_switch_init(struct dsa_chip_data *d);
|
||||
|
||||
void __init orion_i2c_init(unsigned long mapbase,
|
||||
unsigned long irq,
|
||||
|
|
|
@ -12,6 +12,7 @@ config ARM64
|
|||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_HAS_KCOV
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
|
|
|
@ -813,11 +813,6 @@ static inline void bpf_flush_icache(void *start, void *end)
|
|||
flush_icache_range((unsigned long)start, (unsigned long)end);
|
||||
}
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
/* Nothing to do here. We support Internal BPF. */
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
|
@ -903,7 +898,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
|
||||
bpf_flush_icache(header, ctx.image + ctx.idx);
|
||||
|
||||
set_memory_ro((unsigned long)header, header->pages);
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
prog->bpf_func = (void *)ctx.image;
|
||||
prog->jited = 1;
|
||||
|
||||
|
@ -915,18 +910,3 @@ out:
|
|||
tmp : orig_prog);
|
||||
return prog;
|
||||
}
|
||||
|
||||
void bpf_jit_free(struct bpf_prog *prog)
|
||||
{
|
||||
unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
|
||||
struct bpf_binary_header *header = (void *)addr;
|
||||
|
||||
if (!prog->jited)
|
||||
goto free_filter;
|
||||
|
||||
set_memory_rw(addr, header->pages);
|
||||
bpf_jit_binary_free(header);
|
||||
|
||||
free_filter:
|
||||
bpf_prog_unlock_free(prog);
|
||||
}
|
||||
|
|
|
@ -124,7 +124,6 @@ static inline void recv_packet(struct net_device *dev)
|
|||
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
netif_rx(skb);
|
||||
dev->last_rx = jiffies;
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += pktlen;
|
||||
|
||||
|
|
|
@ -1061,7 +1061,3 @@ static int __init octeon_publish_devices(void)
|
|||
return of_platform_bus_probe(NULL, octeon_ids, NULL);
|
||||
}
|
||||
arch_initcall(octeon_publish_devices);
|
||||
|
||||
MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Platform driver for Octeon SOC");
|
||||
|
|
|
@ -961,8 +961,6 @@ common_load:
|
|||
return 0;
|
||||
}
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *fp) { }
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
{
|
||||
u32 proglen;
|
||||
|
@ -1066,6 +1064,7 @@ out:
|
|||
return fp;
|
||||
}
|
||||
|
||||
/* Overriding bpf_jit_free() as we don't set images read-only. */
|
||||
void bpf_jit_free(struct bpf_prog *fp)
|
||||
{
|
||||
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
||||
|
|
|
@ -69,6 +69,7 @@ config S390
|
|||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_HAS_KCOV
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
|
|
|
@ -1262,14 +1262,6 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Classic BPF function stub. BPF programs will be converted into
|
||||
* eBPF and then bpf_int_jit_compile() will be called.
|
||||
*/
|
||||
void bpf_jit_compile(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Compile eBPF program "fp"
|
||||
*/
|
||||
|
@ -1335,7 +1327,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
print_fn_code(jit.prg_buf, jit.size_prg);
|
||||
}
|
||||
if (jit.prg_buf) {
|
||||
set_memory_ro((unsigned long)header, header->pages);
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
fp->bpf_func = (void *) jit.prg_buf;
|
||||
fp->jited = 1;
|
||||
}
|
||||
|
@ -1347,21 +1339,3 @@ out:
|
|||
tmp : orig_fp);
|
||||
return fp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free eBPF program
|
||||
*/
|
||||
void bpf_jit_free(struct bpf_prog *fp)
|
||||
{
|
||||
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
||||
struct bpf_binary_header *header = (void *)addr;
|
||||
|
||||
if (!fp->jited)
|
||||
goto free_filter;
|
||||
|
||||
set_memory_rw(addr, header->pages);
|
||||
bpf_jit_binary_free(header);
|
||||
|
||||
free_filter:
|
||||
bpf_prog_unlock_free(fp);
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ config SPARC
|
|||
select CPU_NO_EFFICIENT_FFS
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select PROVE_LOCKING_SMALL if PROVE_LOCKING
|
||||
select ARCH_WANT_RELAX_ORDER
|
||||
|
||||
config SPARC32
|
||||
def_bool !64BIT
|
||||
|
|
|
@ -53,6 +53,7 @@ config X86
|
|||
select ARCH_HAS_KCOV if X86_64
|
||||
select ARCH_HAS_MMIO_FLUSH
|
||||
select ARCH_HAS_PMEM_API if X86_64
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
|
|
|
@ -1067,13 +1067,13 @@ common_load:
|
|||
|
||||
ilen = prog - temp;
|
||||
if (ilen > BPF_MAX_INSN_SIZE) {
|
||||
pr_err("bpf_jit_compile fatal insn size error\n");
|
||||
pr_err("bpf_jit: fatal insn size error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (image) {
|
||||
if (unlikely(proglen + ilen > oldproglen)) {
|
||||
pr_err("bpf_jit_compile fatal error\n");
|
||||
pr_err("bpf_jit: fatal error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
memcpy(image + proglen, temp, ilen);
|
||||
|
@ -1085,10 +1085,6 @@ common_load:
|
|||
return proglen;
|
||||
}
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_binary_header *header = NULL;
|
||||
|
@ -1169,7 +1165,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
|
||||
if (image) {
|
||||
bpf_flush_icache(header, image + proglen);
|
||||
set_memory_ro((unsigned long)header, header->pages);
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
prog->bpf_func = (void *)image;
|
||||
prog->jited = 1;
|
||||
} else {
|
||||
|
@ -1184,18 +1180,3 @@ out:
|
|||
tmp : orig_prog);
|
||||
return prog;
|
||||
}
|
||||
|
||||
void bpf_jit_free(struct bpf_prog *fp)
|
||||
{
|
||||
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
||||
struct bpf_binary_header *header = (void *)addr;
|
||||
|
||||
if (!fp->jited)
|
||||
goto free_filter;
|
||||
|
||||
set_memory_rw(addr, header->pages);
|
||||
bpf_jit_binary_free(header);
|
||||
|
||||
free_filter:
|
||||
bpf_prog_unlock_free(fp);
|
||||
}
|
||||
|
|
|
@ -1779,7 +1779,7 @@ static int eni_do_init(struct atm_dev *dev)
|
|||
printk(")\n");
|
||||
printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number,
|
||||
eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA",
|
||||
media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]);
|
||||
media_name[eni_in(MID_RES_ID_MCON) & DAUGHTER_ID]);
|
||||
|
||||
error = suni_init(dev);
|
||||
if (error)
|
||||
|
|
|
@ -2132,12 +2132,8 @@ idt77252_init_est(struct vc_map *vc, int pcr)
|
|||
|
||||
est->interval = 2; /* XXX: make this configurable */
|
||||
est->ewma_log = 2; /* XXX: make this configurable */
|
||||
init_timer(&est->timer);
|
||||
est->timer.data = (unsigned long)vc;
|
||||
est->timer.function = idt77252_est_timer;
|
||||
|
||||
est->timer.expires = jiffies + ((HZ / 4) << est->interval);
|
||||
add_timer(&est->timer);
|
||||
setup_timer(&est->timer, idt77252_est_timer, (unsigned long)vc);
|
||||
mod_timer(&est->timer, jiffies + ((HZ / 4) << est->interval));
|
||||
|
||||
return est;
|
||||
}
|
||||
|
@ -3638,9 +3634,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
|
|||
spin_lock_init(&card->cmd_lock);
|
||||
spin_lock_init(&card->tst_lock);
|
||||
|
||||
init_timer(&card->tst_timer);
|
||||
card->tst_timer.data = (unsigned long)card;
|
||||
card->tst_timer.function = tst_timer;
|
||||
setup_timer(&card->tst_timer, tst_timer, (unsigned long)card);
|
||||
|
||||
/* Do the I/O remapping... */
|
||||
card->membase = ioremap(membase, 1024);
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
#define MID_CON_SUNI 0x00000040 /* 0: UTOPIA; 1: SUNI */
|
||||
#define MID_CON_V6 0x00000020 /* 0: non-pipel UTOPIA (required iff
|
||||
!CON_SUNI; 1: UTOPIA */
|
||||
#define DAUGTHER_ID 0x0000001f /* daugther board id */
|
||||
#define DAUGHTER_ID 0x0000001f /* daughter board id */
|
||||
|
||||
/*
|
||||
* Interrupt Status Acknowledge, Interrupt Status & Interrupt Enable
|
||||
|
|
|
@ -136,17 +136,17 @@ static bool bcma_is_core_needed_early(u16 core_id)
|
|||
return false;
|
||||
}
|
||||
|
||||
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
|
||||
static struct device_node *bcma_of_find_child_device(struct device *parent,
|
||||
struct bcma_device *core)
|
||||
{
|
||||
struct device_node *node;
|
||||
u64 size;
|
||||
const __be32 *reg;
|
||||
|
||||
if (!parent || !parent->dev.of_node)
|
||||
if (!parent->of_node)
|
||||
return NULL;
|
||||
|
||||
for_each_child_of_node(parent->dev.of_node, node) {
|
||||
for_each_child_of_node(parent->of_node, node) {
|
||||
reg = of_get_address(node, 0, &size, NULL);
|
||||
if (!reg)
|
||||
continue;
|
||||
|
@ -156,7 +156,7 @@ static struct device_node *bcma_of_find_child_device(struct platform_device *par
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int bcma_of_irq_parse(struct platform_device *parent,
|
||||
static int bcma_of_irq_parse(struct device *parent,
|
||||
struct bcma_device *core,
|
||||
struct of_phandle_args *out_irq, int num)
|
||||
{
|
||||
|
@ -169,7 +169,7 @@ static int bcma_of_irq_parse(struct platform_device *parent,
|
|||
return rc;
|
||||
}
|
||||
|
||||
out_irq->np = parent->dev.of_node;
|
||||
out_irq->np = parent->of_node;
|
||||
out_irq->args_count = 1;
|
||||
out_irq->args[0] = num;
|
||||
|
||||
|
@ -177,13 +177,13 @@ static int bcma_of_irq_parse(struct platform_device *parent,
|
|||
return of_irq_parse_raw(laddr, out_irq);
|
||||
}
|
||||
|
||||
static unsigned int bcma_of_get_irq(struct platform_device *parent,
|
||||
static unsigned int bcma_of_get_irq(struct device *parent,
|
||||
struct bcma_device *core, int num)
|
||||
{
|
||||
struct of_phandle_args out_irq;
|
||||
int ret;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
|
||||
if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node)
|
||||
return 0;
|
||||
|
||||
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
|
||||
|
@ -196,7 +196,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
|
|||
return irq_create_of_mapping(&out_irq);
|
||||
}
|
||||
|
||||
static void bcma_of_fill_device(struct platform_device *parent,
|
||||
static void bcma_of_fill_device(struct device *parent,
|
||||
struct bcma_device *core)
|
||||
{
|
||||
struct device_node *node;
|
||||
|
@ -227,7 +227,7 @@ unsigned int bcma_core_irq(struct bcma_device *core, int num)
|
|||
return mips_irq <= 4 ? mips_irq + 2 : 0;
|
||||
}
|
||||
if (bus->host_pdev)
|
||||
return bcma_of_get_irq(bus->host_pdev, core, num);
|
||||
return bcma_of_get_irq(&bus->host_pdev->dev, core, num);
|
||||
return 0;
|
||||
case BCMA_HOSTTYPE_SDIO:
|
||||
return 0;
|
||||
|
@ -253,7 +253,8 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
|
|||
if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
|
||||
core->dma_dev = &bus->host_pdev->dev;
|
||||
core->dev.parent = &bus->host_pdev->dev;
|
||||
bcma_of_fill_device(bus->host_pdev, core);
|
||||
if (core->dev.parent)
|
||||
bcma_of_fill_device(core->dev.parent, core);
|
||||
} else {
|
||||
core->dev.dma_mask = &core->dev.coherent_dma_mask;
|
||||
core->dma_dev = &core->dev;
|
||||
|
@ -633,8 +634,11 @@ static int bcma_device_probe(struct device *dev)
|
|||
drv);
|
||||
int err = 0;
|
||||
|
||||
get_device(dev);
|
||||
if (adrv->probe)
|
||||
err = adrv->probe(core);
|
||||
if (err)
|
||||
put_device(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -647,6 +651,7 @@ static int bcma_device_remove(struct device *dev)
|
|||
|
||||
if (adrv->remove)
|
||||
adrv->remove(core);
|
||||
put_device(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -344,7 +344,7 @@ config BT_WILINK
|
|||
|
||||
config BT_QCOMSMD
|
||||
tristate "Qualcomm SMD based HCI support"
|
||||
depends on QCOM_SMD && QCOM_WCNSS_CTRL
|
||||
depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST
|
||||
select BT_QCA
|
||||
help
|
||||
Qualcomm SMD based HCI driver.
|
||||
|
|
|
@ -94,6 +94,7 @@ static const struct usb_device_id ath3k_table[] = {
|
|||
{ USB_DEVICE(0x04CA, 0x300f) },
|
||||
{ USB_DEVICE(0x04CA, 0x3010) },
|
||||
{ USB_DEVICE(0x04CA, 0x3014) },
|
||||
{ USB_DEVICE(0x04CA, 0x3018) },
|
||||
{ USB_DEVICE(0x0930, 0x0219) },
|
||||
{ USB_DEVICE(0x0930, 0x021c) },
|
||||
{ USB_DEVICE(0x0930, 0x0220) },
|
||||
|
@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
|||
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
|
|
|
@ -178,6 +178,9 @@ static int btbcm_reset(struct hci_dev *hdev)
|
|||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
/* 100 msec delay for module to complete reset process */
|
||||
msleep(100);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -502,7 +502,7 @@ static int btmrvl_download_cal_data(struct btmrvl_private *priv,
|
|||
ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
|
||||
BT_CAL_HDR_LEN + len);
|
||||
if (ret)
|
||||
BT_ERR("Failed to download caibration data");
|
||||
BT_ERR("Failed to download calibration data");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -97,11 +97,11 @@ static int btmrvl_sdio_probe_of(struct device *dev,
|
|||
cfg->irq_bt = irq_of_parse_and_map(card->plt_of_node, 0);
|
||||
if (!cfg->irq_bt) {
|
||||
dev_err(dev, "fail to parse irq_bt from device tree");
|
||||
cfg->irq_bt = -1;
|
||||
} else {
|
||||
ret = devm_request_irq(dev, cfg->irq_bt,
|
||||
btmrvl_wake_irq_bt,
|
||||
IRQF_TRIGGER_LOW,
|
||||
"bt_wake", cfg);
|
||||
0, "bt_wake", cfg);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"Failed to request irq_bt %d (%d)\n",
|
||||
|
@ -1624,7 +1624,7 @@ static int btmrvl_sdio_suspend(struct device *dev)
|
|||
|
||||
if (priv->adapter->hs_state != HS_ACTIVATED) {
|
||||
if (btmrvl_enable_hs(priv)) {
|
||||
BT_ERR("HS not actived, suspend failed!");
|
||||
BT_ERR("HS not activated, suspend failed!");
|
||||
priv->adapter->is_suspending = false;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -1682,8 +1682,12 @@ static int btmrvl_sdio_resume(struct device *dev)
|
|||
/* Disable platform specific wakeup interrupt */
|
||||
if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
|
||||
disable_irq_wake(card->plt_wake_cfg->irq_bt);
|
||||
if (!card->plt_wake_cfg->wake_by_bt)
|
||||
disable_irq(card->plt_wake_cfg->irq_bt);
|
||||
disable_irq(card->plt_wake_cfg->irq_bt);
|
||||
if (card->plt_wake_cfg->wake_by_bt)
|
||||
/* Undo our disable, since interrupt handler already
|
||||
* did this.
|
||||
*/
|
||||
enable_irq(card->plt_wake_cfg->irq_bt);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -165,6 +165,7 @@ static const struct of_device_id btqcomsmd_of_match[] = {
|
|||
{ .compatible = "qcom,wcnss-bt", },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, btqcomsmd_of_match);
|
||||
|
||||
static struct platform_driver btqcomsmd_driver = {
|
||||
.probe = btqcomsmd_probe,
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/usb.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
|
@ -130,6 +132,10 @@ static const struct usb_device_id btusb_table[] = {
|
|||
/* Broadcom BCM43142A0 (Foxconn/Lenovo) */
|
||||
{ USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
||||
/* Broadcom BCM920703 (HTC Vive) */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01),
|
||||
.driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
||||
/* Foxconn - Hon Hai */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
|
||||
.driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
@ -154,6 +160,10 @@ static const struct usb_device_id btusb_table[] = {
|
|||
{ USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
|
||||
.driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
||||
/* Dell Computer - Broadcom based */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x413c, 0xff, 0x01, 0x01),
|
||||
.driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
||||
/* Toshiba Corp - Broadcom based */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
|
||||
.driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
@ -209,6 +219,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -369,6 +380,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
#define BTUSB_BOOTING 9
|
||||
#define BTUSB_RESET_RESUME 10
|
||||
#define BTUSB_DIAG_RUNNING 11
|
||||
#define BTUSB_OOB_WAKE_ENABLED 12
|
||||
|
||||
struct btusb_data {
|
||||
struct hci_dev *hdev;
|
||||
|
@ -416,6 +428,8 @@ struct btusb_data {
|
|||
int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
|
||||
|
||||
int (*setup_on_usb)(struct hci_dev *hdev);
|
||||
|
||||
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
|
||||
};
|
||||
|
||||
static inline void btusb_free_frags(struct btusb_data *data)
|
||||
|
@ -2338,6 +2352,50 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */
|
||||
static int marvell_config_oob_wake(struct hci_dev *hdev)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct btusb_data *data = hci_get_drvdata(hdev);
|
||||
struct device *dev = &data->udev->dev;
|
||||
u16 pin, gap, opcode;
|
||||
int ret;
|
||||
u8 cmd[5];
|
||||
|
||||
/* Move on if no wakeup pin specified */
|
||||
if (of_property_read_u16(dev->of_node, "marvell,wakeup-pin", &pin) ||
|
||||
of_property_read_u16(dev->of_node, "marvell,wakeup-gap-ms", &gap))
|
||||
return 0;
|
||||
|
||||
/* Vendor specific command to configure a GPIO as wake-up pin */
|
||||
opcode = hci_opcode_pack(0x3F, 0x59);
|
||||
cmd[0] = opcode & 0xFF;
|
||||
cmd[1] = opcode >> 8;
|
||||
cmd[2] = 2; /* length of parameters that follow */
|
||||
cmd[3] = pin;
|
||||
cmd[4] = gap; /* time in ms, for which wakeup pin should be asserted */
|
||||
|
||||
skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
bt_dev_err(hdev, "%s: No memory\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
|
||||
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
|
||||
|
||||
ret = btusb_send_frame(hdev, skb);
|
||||
if (ret) {
|
||||
bt_dev_err(hdev, "%s: configuration failed\n", __func__);
|
||||
kfree_skb(skb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int btusb_set_bdaddr_marvell(struct hci_dev *hdev,
|
||||
const bdaddr_t *bdaddr)
|
||||
{
|
||||
|
@ -2728,6 +2786,66 @@ static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static irqreturn_t btusb_oob_wake_handler(int irq, void *priv)
|
||||
{
|
||||
struct btusb_data *data = priv;
|
||||
|
||||
pm_wakeup_event(&data->udev->dev, 0);
|
||||
|
||||
/* Disable only if not already disabled (keep it balanced) */
|
||||
if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) {
|
||||
disable_irq_nosync(irq);
|
||||
disable_irq_wake(irq);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static const struct of_device_id btusb_match_table[] = {
|
||||
{ .compatible = "usb1286,204e" },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, btusb_match_table);
|
||||
|
||||
/* Use an oob wakeup pin? */
|
||||
static int btusb_config_oob_wake(struct hci_dev *hdev)
|
||||
{
|
||||
struct btusb_data *data = hci_get_drvdata(hdev);
|
||||
struct device *dev = &data->udev->dev;
|
||||
int irq, ret;
|
||||
|
||||
clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags);
|
||||
|
||||
if (!of_match_device(btusb_match_table, dev))
|
||||
return 0;
|
||||
|
||||
/* Move on if no IRQ specified */
|
||||
irq = of_irq_get_byname(dev->of_node, "wakeup");
|
||||
if (irq <= 0) {
|
||||
bt_dev_dbg(hdev, "%s: no OOB Wakeup IRQ in DT", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
|
||||
0, "OOB Wake-on-BT", data);
|
||||
if (ret) {
|
||||
bt_dev_err(hdev, "%s: IRQ request failed", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = device_init_wakeup(dev, true);
|
||||
if (ret) {
|
||||
bt_dev_err(hdev, "%s: failed to init_wakeup", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
data->oob_wake_irq = irq;
|
||||
disable_irq(irq);
|
||||
bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int btusb_probe(struct usb_interface *intf,
|
||||
const struct usb_device_id *id)
|
||||
{
|
||||
|
@ -2849,6 +2967,18 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
hdev->send = btusb_send_frame;
|
||||
hdev->notify = btusb_notify;
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
err = btusb_config_oob_wake(hdev);
|
||||
if (err)
|
||||
goto out_free_dev;
|
||||
|
||||
/* Marvell devices may need a specific chip configuration */
|
||||
if (id->driver_info & BTUSB_MARVELL && data->oob_wake_irq) {
|
||||
err = marvell_config_oob_wake(hdev);
|
||||
if (err)
|
||||
goto out_free_dev;
|
||||
}
|
||||
#endif
|
||||
if (id->driver_info & BTUSB_CW6622)
|
||||
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
|
||||
|
||||
|
@ -2991,18 +3121,15 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
err = usb_set_interface(data->udev, 0, 0);
|
||||
if (err < 0) {
|
||||
BT_ERR("failed to set interface 0, alt 0 %d", err);
|
||||
hci_free_dev(hdev);
|
||||
return err;
|
||||
goto out_free_dev;
|
||||
}
|
||||
}
|
||||
|
||||
if (data->isoc) {
|
||||
err = usb_driver_claim_interface(&btusb_driver,
|
||||
data->isoc, data);
|
||||
if (err < 0) {
|
||||
hci_free_dev(hdev);
|
||||
return err;
|
||||
}
|
||||
if (err < 0)
|
||||
goto out_free_dev;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BT_HCIBTUSB_BCM
|
||||
|
@ -3016,14 +3143,16 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
#endif
|
||||
|
||||
err = hci_register_dev(hdev);
|
||||
if (err < 0) {
|
||||
hci_free_dev(hdev);
|
||||
return err;
|
||||
}
|
||||
if (err < 0)
|
||||
goto out_free_dev;
|
||||
|
||||
usb_set_intfdata(intf, data);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_dev:
|
||||
hci_free_dev(hdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void btusb_disconnect(struct usb_interface *intf)
|
||||
|
@ -3062,6 +3191,9 @@ static void btusb_disconnect(struct usb_interface *intf)
|
|||
usb_driver_release_interface(&btusb_driver, data->isoc);
|
||||
}
|
||||
|
||||
if (data->oob_wake_irq)
|
||||
device_init_wakeup(&data->udev->dev, false);
|
||||
|
||||
hci_free_dev(hdev);
|
||||
}
|
||||
|
||||
|
@ -3090,6 +3222,12 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
|
|||
btusb_stop_traffic(data);
|
||||
usb_kill_anchored_urbs(&data->tx_anchor);
|
||||
|
||||
if (data->oob_wake_irq && device_may_wakeup(&data->udev->dev)) {
|
||||
set_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags);
|
||||
enable_irq_wake(data->oob_wake_irq);
|
||||
enable_irq(data->oob_wake_irq);
|
||||
}
|
||||
|
||||
/* Optionally request a device reset on resume, but only when
|
||||
* wakeups are disabled. If wakeups are enabled we assume the
|
||||
* device will stay powered up throughout suspend.
|
||||
|
@ -3127,6 +3265,12 @@ static int btusb_resume(struct usb_interface *intf)
|
|||
if (--data->suspend_count)
|
||||
return 0;
|
||||
|
||||
/* Disable only if not already disabled (keep it balanced) */
|
||||
if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) {
|
||||
disable_irq(data->oob_wake_irq);
|
||||
disable_irq_wake(data->oob_wake_irq);
|
||||
}
|
||||
|
||||
if (!test_bit(HCI_RUNNING, &hdev->flags))
|
||||
goto done;
|
||||
|
||||
|
|
|
@ -618,14 +618,25 @@ unlock:
|
|||
}
|
||||
#endif
|
||||
|
||||
static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
|
||||
static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
|
||||
static const struct acpi_gpio_params host_wakeup_gpios = { 2, 0, false };
|
||||
static const struct acpi_gpio_params int_last_device_wakeup_gpios = { 0, 0, false };
|
||||
static const struct acpi_gpio_params int_last_shutdown_gpios = { 1, 0, false };
|
||||
static const struct acpi_gpio_params int_last_host_wakeup_gpios = { 2, 0, false };
|
||||
|
||||
static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
|
||||
{ "device-wakeup-gpios", &device_wakeup_gpios, 1 },
|
||||
{ "shutdown-gpios", &shutdown_gpios, 1 },
|
||||
{ "host-wakeup-gpios", &host_wakeup_gpios, 1 },
|
||||
static const struct acpi_gpio_mapping acpi_bcm_int_last_gpios[] = {
|
||||
{ "device-wakeup-gpios", &int_last_device_wakeup_gpios, 1 },
|
||||
{ "shutdown-gpios", &int_last_shutdown_gpios, 1 },
|
||||
{ "host-wakeup-gpios", &int_last_host_wakeup_gpios, 1 },
|
||||
{ },
|
||||
};
|
||||
|
||||
static const struct acpi_gpio_params int_first_host_wakeup_gpios = { 0, 0, false };
|
||||
static const struct acpi_gpio_params int_first_device_wakeup_gpios = { 1, 0, false };
|
||||
static const struct acpi_gpio_params int_first_shutdown_gpios = { 2, 0, false };
|
||||
|
||||
static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = {
|
||||
{ "device-wakeup-gpios", &int_first_device_wakeup_gpios, 1 },
|
||||
{ "shutdown-gpios", &int_first_shutdown_gpios, 1 },
|
||||
{ "host-wakeup-gpios", &int_first_host_wakeup_gpios, 1 },
|
||||
{ },
|
||||
};
|
||||
|
||||
|
@ -692,12 +703,19 @@ static int bcm_acpi_probe(struct bcm_device *dev)
|
|||
struct platform_device *pdev = dev->pdev;
|
||||
LIST_HEAD(resources);
|
||||
const struct dmi_system_id *dmi_id;
|
||||
const struct acpi_gpio_mapping *gpio_mapping = acpi_bcm_int_last_gpios;
|
||||
const struct acpi_device_id *id;
|
||||
int ret;
|
||||
|
||||
/* Retrieve GPIO data */
|
||||
dev->name = dev_name(&pdev->dev);
|
||||
|
||||
/* Retrieve GPIO data */
|
||||
id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
|
||||
if (id)
|
||||
gpio_mapping = (const struct acpi_gpio_mapping *) id->driver_data;
|
||||
|
||||
ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
|
||||
acpi_bcm_default_gpios);
|
||||
gpio_mapping);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -822,20 +840,22 @@ static const struct hci_uart_proto bcm_proto = {
|
|||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct acpi_device_id bcm_acpi_match[] = {
|
||||
{ "BCM2E1A", 0 },
|
||||
{ "BCM2E39", 0 },
|
||||
{ "BCM2E3A", 0 },
|
||||
{ "BCM2E3D", 0 },
|
||||
{ "BCM2E3F", 0 },
|
||||
{ "BCM2E40", 0 },
|
||||
{ "BCM2E54", 0 },
|
||||
{ "BCM2E55", 0 },
|
||||
{ "BCM2E64", 0 },
|
||||
{ "BCM2E65", 0 },
|
||||
{ "BCM2E67", 0 },
|
||||
{ "BCM2E71", 0 },
|
||||
{ "BCM2E7B", 0 },
|
||||
{ "BCM2E7C", 0 },
|
||||
{ "BCM2E1A", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E39", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E3A", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E3D", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E3F", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E40", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E54", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E55", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E64", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E65", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E67", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E71", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E7B", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E7C", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
|
||||
{ "BCM2E95", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
|
||||
{ "BCM2E96", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
|
||||
|
|
|
@ -335,7 +335,7 @@ static void hci_ibs_tx_idle_timeout(unsigned long arg)
|
|||
/* Fall through */
|
||||
|
||||
default:
|
||||
BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
|
||||
BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -373,7 +373,7 @@ static void hci_ibs_wake_retrans_timeout(unsigned long arg)
|
|||
/* Fall through */
|
||||
|
||||
default:
|
||||
BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
|
||||
BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -2467,14 +2467,12 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
|
|||
struct net_device *dev;
|
||||
|
||||
prio = rt_tos2priority(tos);
|
||||
dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
|
||||
vlan_dev_real_dev(ndev) : ndev;
|
||||
|
||||
dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
|
||||
if (dev->num_tc)
|
||||
return netdev_get_prio_tc_map(dev, prio);
|
||||
|
||||
#if IS_ENABLED(CONFIG_VLAN_8021Q)
|
||||
if (ndev->priv_flags & IFF_802_1Q_VLAN)
|
||||
if (is_vlan_dev(ndev))
|
||||
return (vlan_dev_get_egress_qos_mask(ndev, prio) &
|
||||
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
|
||||
#endif
|
||||
|
|
|
@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
|||
{
|
||||
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
|
||||
struct mlx5_ib_cq *cq = to_mcq(ibcq);
|
||||
void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
|
||||
void __iomem *uar_page = mdev->priv.uar->map;
|
||||
unsigned long irq_flags;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -704,9 +704,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
|||
mlx5_cq_arm(&cq->mcq,
|
||||
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
|
||||
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
|
||||
uar_page,
|
||||
MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
|
||||
to_mcq(ibcq)->mcq.cons_index);
|
||||
uar_page, to_mcq(ibcq)->mcq.cons_index);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -790,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
|||
MLX5_SET(cqc, cqc, log_page_size,
|
||||
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
|
||||
|
||||
*index = to_mucontext(context)->uuari.uars[0].index;
|
||||
*index = to_mucontext(context)->bfregi.sys_pages[0];
|
||||
|
||||
if (ucmd.cqe_comp_en == 1) {
|
||||
if (unlikely((*cqe_size != 64) ||
|
||||
|
@ -886,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
|
|||
MLX5_SET(cqc, cqc, log_page_size,
|
||||
cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
|
||||
|
||||
*index = dev->mdev->priv.uuari.uars[0].index;
|
||||
*index = dev->mdev->priv.uar->index;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
#include <linux/in.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include <linux/mlx5/vport.h>
|
||||
#include "mlx5_ib.h"
|
||||
|
||||
#define DRIVER_NAME "mlx5_ib"
|
||||
|
@ -672,17 +673,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
|
||||
uhw->outlen)) {
|
||||
resp.mlx5_ib_support_multi_pkt_send_wqes =
|
||||
MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
|
||||
resp.response_length +=
|
||||
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), reserved, uhw->outlen))
|
||||
resp.response_length += sizeof(resp.reserved);
|
||||
|
||||
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
|
||||
resp.cqe_comp_caps.max_num =
|
||||
MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
|
||||
|
@ -706,6 +696,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
resp.response_length += sizeof(resp.packet_pacing_caps);
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
|
||||
uhw->outlen)) {
|
||||
resp.mlx5_ib_support_multi_pkt_send_wqes =
|
||||
MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
|
||||
resp.response_length +=
|
||||
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), reserved, uhw->outlen))
|
||||
resp.response_length += sizeof(resp.reserved);
|
||||
|
||||
if (uhw->outlen) {
|
||||
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
|
||||
|
||||
|
@ -992,6 +993,86 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
|
||||
{
|
||||
mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
|
||||
caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
|
||||
}
|
||||
|
||||
static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
|
||||
struct mlx5_ib_alloc_ucontext_req_v2 *req,
|
||||
u32 *num_sys_pages)
|
||||
{
|
||||
int uars_per_sys_page;
|
||||
int bfregs_per_sys_page;
|
||||
int ref_bfregs = req->total_num_bfregs;
|
||||
|
||||
if (req->total_num_bfregs == 0)
|
||||
return -EINVAL;
|
||||
|
||||
BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
|
||||
BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
|
||||
|
||||
if (req->total_num_bfregs > MLX5_MAX_BFREGS)
|
||||
return -ENOMEM;
|
||||
|
||||
uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
|
||||
bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
|
||||
req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
|
||||
*num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
|
||||
|
||||
if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
|
||||
return -EINVAL;
|
||||
|
||||
mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n",
|
||||
MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
|
||||
lib_uar_4k ? "yes" : "no", ref_bfregs,
|
||||
req->total_num_bfregs, *num_sys_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
|
||||
{
|
||||
struct mlx5_bfreg_info *bfregi;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
bfregi = &context->bfregi;
|
||||
for (i = 0; i < bfregi->num_sys_pages; i++) {
|
||||
err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
|
||||
}
|
||||
return 0;
|
||||
|
||||
error:
|
||||
for (--i; i >= 0; i--)
|
||||
if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
|
||||
mlx5_ib_warn(dev, "failed to free uar %d\n", i);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
|
||||
{
|
||||
struct mlx5_bfreg_info *bfregi;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
bfregi = &context->bfregi;
|
||||
for (i = 0; i < bfregi->num_sys_pages; i++) {
|
||||
err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "failed to free uar %d\n", i);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
|
@ -999,17 +1080,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|||
struct mlx5_ib_alloc_ucontext_req_v2 req = {};
|
||||
struct mlx5_ib_alloc_ucontext_resp resp = {};
|
||||
struct mlx5_ib_ucontext *context;
|
||||
struct mlx5_uuar_info *uuari;
|
||||
struct mlx5_uar *uars;
|
||||
int gross_uuars;
|
||||
int num_uars;
|
||||
struct mlx5_bfreg_info *bfregi;
|
||||
int ver;
|
||||
int uuarn;
|
||||
int err;
|
||||
int i;
|
||||
size_t reqlen;
|
||||
size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
|
||||
max_cqe_version);
|
||||
bool lib_uar_4k;
|
||||
|
||||
if (!dev->ib_active)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
@ -1032,27 +1109,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|||
if (req.flags)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (req.total_num_uuars > MLX5_MAX_UUARS)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (req.total_num_uuars == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
if (reqlen > sizeof(req) &&
|
||||
!ib_is_udata_cleared(udata, sizeof(req),
|
||||
reqlen - sizeof(req)))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
req.total_num_uuars = ALIGN(req.total_num_uuars,
|
||||
MLX5_NON_FP_BF_REGS_PER_PAGE);
|
||||
if (req.num_low_latency_uuars > req.total_num_uuars - 1)
|
||||
req.total_num_bfregs = ALIGN(req.total_num_bfregs,
|
||||
MLX5_NON_FP_BFREGS_PER_UAR);
|
||||
if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
|
||||
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
|
||||
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
|
||||
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
|
||||
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
|
||||
|
@ -1065,6 +1129,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|||
resp.cqe_version = min_t(__u8,
|
||||
(__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
|
||||
req.max_cqe_version);
|
||||
resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
|
||||
MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
|
||||
resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
|
||||
MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
|
||||
resp.response_length = min(offsetof(typeof(resp), response_length) +
|
||||
sizeof(resp.response_length), udata->outlen);
|
||||
|
||||
|
@ -1072,58 +1140,58 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
uuari = &context->uuari;
|
||||
mutex_init(&uuari->lock);
|
||||
uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
|
||||
if (!uars) {
|
||||
lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
|
||||
bfregi = &context->bfregi;
|
||||
|
||||
/* updates req->total_num_bfregs */
|
||||
err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
|
||||
if (err)
|
||||
goto out_ctx;
|
||||
|
||||
mutex_init(&bfregi->lock);
|
||||
bfregi->lib_uar_4k = lib_uar_4k;
|
||||
bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
|
||||
GFP_KERNEL);
|
||||
if (!bfregi->count) {
|
||||
err = -ENOMEM;
|
||||
goto out_ctx;
|
||||
}
|
||||
|
||||
uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
|
||||
sizeof(*uuari->bitmap),
|
||||
GFP_KERNEL);
|
||||
if (!uuari->bitmap) {
|
||||
bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
|
||||
sizeof(*bfregi->sys_pages),
|
||||
GFP_KERNEL);
|
||||
if (!bfregi->sys_pages) {
|
||||
err = -ENOMEM;
|
||||
goto out_uar_ctx;
|
||||
}
|
||||
/*
|
||||
* clear all fast path uuars
|
||||
*/
|
||||
for (i = 0; i < gross_uuars; i++) {
|
||||
uuarn = i & 3;
|
||||
if (uuarn == 2 || uuarn == 3)
|
||||
set_bit(i, uuari->bitmap);
|
||||
goto out_count;
|
||||
}
|
||||
|
||||
uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
|
||||
if (!uuari->count) {
|
||||
err = -ENOMEM;
|
||||
goto out_bitmap;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_uars; i++) {
|
||||
err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
|
||||
if (err)
|
||||
goto out_count;
|
||||
}
|
||||
err = allocate_uars(dev, context);
|
||||
if (err)
|
||||
goto out_sys_pages;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
|
||||
#endif
|
||||
|
||||
context->upd_xlt_page = __get_free_page(GFP_KERNEL);
|
||||
if (!context->upd_xlt_page) {
|
||||
err = -ENOMEM;
|
||||
goto out_uars;
|
||||
}
|
||||
mutex_init(&context->upd_xlt_page_mutex);
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
|
||||
err = mlx5_core_alloc_transport_domain(dev->mdev,
|
||||
&context->tdn);
|
||||
if (err)
|
||||
goto out_uars;
|
||||
goto out_page;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&context->vma_private_list);
|
||||
INIT_LIST_HEAD(&context->db_page_list);
|
||||
mutex_init(&context->db_page_mutex);
|
||||
|
||||
resp.tot_uuars = req.total_num_uuars;
|
||||
resp.tot_bfregs = req.total_num_bfregs;
|
||||
resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
|
||||
|
||||
if (field_avail(typeof(resp), cqe_version, udata->outlen))
|
||||
|
@ -1135,32 +1203,46 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|||
resp.response_length += sizeof(resp.cmds_supp_uhw);
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
|
||||
if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
|
||||
mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
|
||||
resp.eth_min_inline++;
|
||||
}
|
||||
resp.response_length += sizeof(resp.eth_min_inline);
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't want to expose information from the PCI bar that is located
|
||||
* after 4096 bytes, so if the arch only supports larger pages, let's
|
||||
* pretend we don't support reading the HCA's core clock. This is also
|
||||
* forced by mmap function.
|
||||
*/
|
||||
if (PAGE_SIZE <= 4096 &&
|
||||
field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
|
||||
resp.comp_mask |=
|
||||
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
|
||||
resp.hca_core_clock_offset =
|
||||
offsetof(struct mlx5_init_seg, internal_timer_h) %
|
||||
PAGE_SIZE;
|
||||
if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
|
||||
if (PAGE_SIZE <= 4096) {
|
||||
resp.comp_mask |=
|
||||
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
|
||||
resp.hca_core_clock_offset =
|
||||
offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
|
||||
}
|
||||
resp.response_length += sizeof(resp.hca_core_clock_offset) +
|
||||
sizeof(resp.reserved2);
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), log_uar_size, udata->outlen))
|
||||
resp.response_length += sizeof(resp.log_uar_size);
|
||||
|
||||
if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
|
||||
resp.response_length += sizeof(resp.num_uars_per_page);
|
||||
|
||||
err = ib_copy_to_udata(udata, &resp, resp.response_length);
|
||||
if (err)
|
||||
goto out_td;
|
||||
|
||||
uuari->ver = ver;
|
||||
uuari->num_low_latency_uuars = req.num_low_latency_uuars;
|
||||
uuari->uars = uars;
|
||||
uuari->num_uars = num_uars;
|
||||
bfregi->ver = ver;
|
||||
bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
|
||||
context->cqe_version = resp.cqe_version;
|
||||
context->lib_caps = req.lib_caps;
|
||||
print_lib_caps(dev, context->lib_caps);
|
||||
|
||||
return &context->ibucontext;
|
||||
|
||||
|
@ -1168,20 +1250,21 @@ out_td:
|
|||
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
|
||||
mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
|
||||
|
||||
out_page:
|
||||
free_page(context->upd_xlt_page);
|
||||
|
||||
out_uars:
|
||||
for (i--; i >= 0; i--)
|
||||
mlx5_cmd_free_uar(dev->mdev, uars[i].index);
|
||||
deallocate_uars(dev, context);
|
||||
|
||||
out_sys_pages:
|
||||
kfree(bfregi->sys_pages);
|
||||
|
||||
out_count:
|
||||
kfree(uuari->count);
|
||||
|
||||
out_bitmap:
|
||||
kfree(uuari->bitmap);
|
||||
|
||||
out_uar_ctx:
|
||||
kfree(uars);
|
||||
kfree(bfregi->count);
|
||||
|
||||
out_ctx:
|
||||
kfree(context);
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -1189,28 +1272,31 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
|||
{
|
||||
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
|
||||
struct mlx5_uuar_info *uuari = &context->uuari;
|
||||
int i;
|
||||
struct mlx5_bfreg_info *bfregi;
|
||||
|
||||
bfregi = &context->bfregi;
|
||||
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
|
||||
mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
|
||||
|
||||
for (i = 0; i < uuari->num_uars; i++) {
|
||||
if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
|
||||
mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
|
||||
}
|
||||
|
||||
kfree(uuari->count);
|
||||
kfree(uuari->bitmap);
|
||||
kfree(uuari->uars);
|
||||
free_page(context->upd_xlt_page);
|
||||
deallocate_uars(dev, context);
|
||||
kfree(bfregi->sys_pages);
|
||||
kfree(bfregi->count);
|
||||
kfree(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
|
||||
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi,
|
||||
int idx)
|
||||
{
|
||||
return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
|
||||
int fw_uars_per_page;
|
||||
|
||||
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
|
||||
|
||||
return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
|
||||
bfregi->sys_pages[idx] / fw_uars_per_page;
|
||||
}
|
||||
|
||||
static int get_command(unsigned long offset)
|
||||
|
@ -1365,11 +1451,23 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
|
|||
struct vm_area_struct *vma,
|
||||
struct mlx5_ib_ucontext *context)
|
||||
{
|
||||
struct mlx5_uuar_info *uuari = &context->uuari;
|
||||
struct mlx5_bfreg_info *bfregi = &context->bfregi;
|
||||
int err;
|
||||
unsigned long idx;
|
||||
phys_addr_t pfn, pa;
|
||||
pgprot_t prot;
|
||||
int uars_per_page;
|
||||
|
||||
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
|
||||
idx = get_index(vma->vm_pgoff);
|
||||
if (idx % uars_per_page ||
|
||||
idx * uars_per_page >= bfregi->num_sys_pages) {
|
||||
mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case MLX5_IB_MMAP_WC_PAGE:
|
||||
|
@ -1392,14 +1490,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
idx = get_index(vma->vm_pgoff);
|
||||
if (idx >= uuari->num_uars)
|
||||
return -EINVAL;
|
||||
|
||||
pfn = uar_index2pfn(dev, uuari->uars[idx].index);
|
||||
pfn = uar_index2pfn(dev, bfregi, idx);
|
||||
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
|
||||
|
||||
vma->vm_page_prot = prot;
|
||||
|
@ -1622,9 +1713,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
|||
|
||||
if (ib_spec->eth.mask.vlan_tag) {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
vlan_tag, 1);
|
||||
cvlan_tag, 1);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
vlan_tag, 1);
|
||||
cvlan_tag, 1);
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
|
||||
|
@ -3060,8 +3151,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|||
if (mlx5_use_mad_ifc(dev))
|
||||
get_ext_port_caps(dev);
|
||||
|
||||
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
|
||||
|
||||
if (!mlx5_lag_is_active(mdev))
|
||||
name = "mlx5_%d";
|
||||
else
|
||||
|
@ -3237,9 +3326,21 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|||
if (err)
|
||||
goto err_odp;
|
||||
|
||||
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
|
||||
if (!dev->mdev->priv.uar)
|
||||
goto err_q_cnt;
|
||||
|
||||
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
|
||||
if (err)
|
||||
goto err_uar_page;
|
||||
|
||||
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
|
||||
if (err)
|
||||
goto err_bfreg;
|
||||
|
||||
err = ib_register_device(&dev->ib_dev, NULL);
|
||||
if (err)
|
||||
goto err_q_cnt;
|
||||
goto err_fp_bfreg;
|
||||
|
||||
err = create_umr_res(dev);
|
||||
if (err)
|
||||
|
@ -3262,6 +3363,15 @@ err_umrc:
|
|||
err_dev:
|
||||
ib_unregister_device(&dev->ib_dev);
|
||||
|
||||
err_fp_bfreg:
|
||||
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
|
||||
|
||||
err_bfreg:
|
||||
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
|
||||
|
||||
err_uar_page:
|
||||
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
|
||||
|
||||
err_q_cnt:
|
||||
mlx5_ib_dealloc_q_counters(dev);
|
||||
|
||||
|
@ -3293,6 +3403,9 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
|
|||
|
||||
mlx5_remove_netdev_notifier(dev);
|
||||
ib_unregister_device(&dev->ib_dev);
|
||||
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
|
||||
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
|
||||
mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
|
||||
mlx5_ib_dealloc_q_counters(dev);
|
||||
destroy_umrc_res(dev);
|
||||
mlx5_ib_odp_remove_one(dev);
|
||||
|
@ -3307,6 +3420,9 @@ static struct mlx5_interface mlx5_ib_interface = {
|
|||
.add = mlx5_ib_add,
|
||||
.remove = mlx5_ib_remove,
|
||||
.event = mlx5_ib_event,
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
.pfault = mlx5_ib_pfault,
|
||||
#endif
|
||||
.protocol = MLX5_INTERFACE_PROTOCOL_IB,
|
||||
};
|
||||
|
||||
|
@ -3317,25 +3433,14 @@ static int __init mlx5_ib_init(void)
|
|||
if (deprecated_prof_sel != 2)
|
||||
pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
|
||||
|
||||
err = mlx5_ib_odp_init();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_register_interface(&mlx5_ib_interface);
|
||||
if (err)
|
||||
goto clean_odp;
|
||||
|
||||
return err;
|
||||
|
||||
clean_odp:
|
||||
mlx5_ib_odp_cleanup();
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit mlx5_ib_cleanup(void)
|
||||
{
|
||||
mlx5_unregister_interface(&mlx5_ib_interface);
|
||||
mlx5_ib_odp_cleanup();
|
||||
}
|
||||
|
||||
module_init(mlx5_ib_init);
|
||||
|
|
|
@ -159,7 +159,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
|
|||
unsigned long umem_page_shift = ilog2(umem->page_size);
|
||||
int shift = page_shift - umem_page_shift;
|
||||
int mask = (1 << shift) - 1;
|
||||
int i, k;
|
||||
int i, k, idx;
|
||||
u64 cur = 0;
|
||||
u64 base;
|
||||
int len;
|
||||
|
@ -185,18 +185,36 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
|
|||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
|
||||
len = sg_dma_len(sg) >> umem_page_shift;
|
||||
base = sg_dma_address(sg);
|
||||
for (k = 0; k < len; k++) {
|
||||
|
||||
/* Skip elements below offset */
|
||||
if (i + len < offset << shift) {
|
||||
i += len;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Skip pages below offset */
|
||||
if (i < offset << shift) {
|
||||
k = (offset << shift) - i;
|
||||
i = offset << shift;
|
||||
} else {
|
||||
k = 0;
|
||||
}
|
||||
|
||||
for (; k < len; k++) {
|
||||
if (!(i & mask)) {
|
||||
cur = base + (k << umem_page_shift);
|
||||
cur |= access_flags;
|
||||
idx = (i >> shift) - offset;
|
||||
|
||||
pas[i >> shift] = cpu_to_be64(cur);
|
||||
pas[idx] = cpu_to_be64(cur);
|
||||
mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
|
||||
i >> shift, be64_to_cpu(pas[i >> shift]));
|
||||
} else
|
||||
mlx5_ib_dbg(dev, "=====> 0x%llx\n",
|
||||
base + (k << umem_page_shift));
|
||||
i >> shift, be64_to_cpu(pas[idx]));
|
||||
}
|
||||
i++;
|
||||
|
||||
/* Stop after num_pages reached */
|
||||
if (i >> shift >= offset + num_pages)
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,6 @@ enum mlx5_ib_latency_class {
|
|||
MLX5_IB_LATENCY_CLASS_LOW,
|
||||
MLX5_IB_LATENCY_CLASS_MEDIUM,
|
||||
MLX5_IB_LATENCY_CLASS_HIGH,
|
||||
MLX5_IB_LATENCY_CLASS_FAST_PATH
|
||||
};
|
||||
|
||||
enum mlx5_ib_mad_ifc_flags {
|
||||
|
@ -100,7 +99,7 @@ enum mlx5_ib_mad_ifc_flags {
|
|||
};
|
||||
|
||||
enum {
|
||||
MLX5_CROSS_CHANNEL_UUAR = 0,
|
||||
MLX5_CROSS_CHANNEL_BFREG = 0,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -120,11 +119,16 @@ struct mlx5_ib_ucontext {
|
|||
/* protect doorbell record alloc/free
|
||||
*/
|
||||
struct mutex db_page_mutex;
|
||||
struct mlx5_uuar_info uuari;
|
||||
struct mlx5_bfreg_info bfregi;
|
||||
u8 cqe_version;
|
||||
/* Transport Domain number */
|
||||
u32 tdn;
|
||||
struct list_head vma_private_list;
|
||||
|
||||
unsigned long upd_xlt_page;
|
||||
/* protect ODP/KSM */
|
||||
struct mutex upd_xlt_page_mutex;
|
||||
u64 lib_caps;
|
||||
};
|
||||
|
||||
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
|
||||
|
@ -174,13 +178,12 @@ struct mlx5_ib_flow_db {
|
|||
* enum ib_send_flags and enum ib_qp_type for low-level driver
|
||||
*/
|
||||
|
||||
#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
|
||||
#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
|
||||
#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
|
||||
|
||||
#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
|
||||
#define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
|
||||
#define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
|
||||
#define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
|
||||
#define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
|
||||
#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
|
||||
#define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
|
||||
#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
|
||||
#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
|
||||
|
||||
#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
|
||||
/*
|
||||
|
@ -190,6 +193,16 @@ struct mlx5_ib_flow_db {
|
|||
#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
|
||||
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
|
||||
|
||||
#define MLX5_IB_UMR_OCTOWORD 16
|
||||
#define MLX5_IB_UMR_XLT_ALIGNMENT 64
|
||||
|
||||
#define MLX5_IB_UPD_XLT_ZAP BIT(0)
|
||||
#define MLX5_IB_UPD_XLT_ENABLE BIT(1)
|
||||
#define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
|
||||
#define MLX5_IB_UPD_XLT_ADDR BIT(3)
|
||||
#define MLX5_IB_UPD_XLT_PD BIT(4)
|
||||
#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
|
||||
|
||||
/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
|
||||
*
|
||||
* These flags are intended for internal use by the mlx5_ib driver, and they
|
||||
|
@ -264,29 +277,6 @@ struct mlx5_ib_rwq_ind_table {
|
|||
u32 rqtn;
|
||||
};
|
||||
|
||||
/*
|
||||
* Connect-IB can trigger up to four concurrent pagefaults
|
||||
* per-QP.
|
||||
*/
|
||||
enum mlx5_ib_pagefault_context {
|
||||
MLX5_IB_PAGEFAULT_RESPONDER_READ,
|
||||
MLX5_IB_PAGEFAULT_REQUESTOR_READ,
|
||||
MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
|
||||
MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
|
||||
MLX5_IB_PAGEFAULT_CONTEXTS
|
||||
};
|
||||
|
||||
static inline enum mlx5_ib_pagefault_context
|
||||
mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
|
||||
{
|
||||
return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
|
||||
}
|
||||
|
||||
struct mlx5_ib_pfault {
|
||||
struct work_struct work;
|
||||
struct mlx5_pagefault mpfault;
|
||||
};
|
||||
|
||||
struct mlx5_ib_ubuffer {
|
||||
struct ib_umem *umem;
|
||||
int buf_size;
|
||||
|
@ -334,6 +324,12 @@ struct mlx5_ib_raw_packet_qp {
|
|||
struct mlx5_ib_rq rq;
|
||||
};
|
||||
|
||||
struct mlx5_bf {
|
||||
int buf_size;
|
||||
unsigned long offset;
|
||||
struct mlx5_sq_bfreg *bfreg;
|
||||
};
|
||||
|
||||
struct mlx5_ib_qp {
|
||||
struct ib_qp ibqp;
|
||||
union {
|
||||
|
@ -359,33 +355,19 @@ struct mlx5_ib_qp {
|
|||
int wq_sig;
|
||||
int scat_cqe;
|
||||
int max_inline_data;
|
||||
struct mlx5_bf *bf;
|
||||
struct mlx5_bf bf;
|
||||
int has_rq;
|
||||
|
||||
/* only for user space QPs. For kernel
|
||||
* we have it from the bf object
|
||||
*/
|
||||
int uuarn;
|
||||
int bfregn;
|
||||
|
||||
int create_type;
|
||||
|
||||
/* Store signature errors */
|
||||
bool signature_en;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
/*
|
||||
* A flag that is true for QP's that are in a state that doesn't
|
||||
* allow page faults, and shouldn't schedule any more faults.
|
||||
*/
|
||||
int disable_page_faults;
|
||||
/*
|
||||
* The disable_page_faults_lock protects a QP's disable_page_faults
|
||||
* field, allowing for a thread to atomically check whether the QP
|
||||
* allows page faults, and if so schedule a page fault.
|
||||
*/
|
||||
spinlock_t disable_page_faults_lock;
|
||||
struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
|
||||
#endif
|
||||
struct list_head qps_list;
|
||||
struct list_head cq_recv_list;
|
||||
struct list_head cq_send_list;
|
||||
|
@ -414,13 +396,11 @@ enum mlx5_ib_qp_flags {
|
|||
|
||||
struct mlx5_umr_wr {
|
||||
struct ib_send_wr wr;
|
||||
union {
|
||||
u64 virt_addr;
|
||||
u64 offset;
|
||||
} target;
|
||||
u64 virt_addr;
|
||||
u64 offset;
|
||||
struct ib_pd *pd;
|
||||
unsigned int page_shift;
|
||||
unsigned int npages;
|
||||
unsigned int xlt_size;
|
||||
u64 length;
|
||||
int access_flags;
|
||||
u32 mkey;
|
||||
|
@ -617,7 +597,6 @@ struct mlx5_ib_dev {
|
|||
struct ib_device ib_dev;
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5_roce roce;
|
||||
MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
|
||||
int num_ports;
|
||||
/* serialize update of capability mask
|
||||
*/
|
||||
|
@ -634,6 +613,7 @@ struct mlx5_ib_dev {
|
|||
int fill_delay;
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
struct ib_odp_caps odp_caps;
|
||||
u64 odp_max_size;
|
||||
/*
|
||||
* Sleepable RCU that prevents destruction of MRs while they are still
|
||||
* being used by a page fault handler.
|
||||
|
@ -646,6 +626,8 @@ struct mlx5_ib_dev {
|
|||
struct list_head qp_list;
|
||||
/* Array with num_ports elements */
|
||||
struct mlx5_ib_port *port;
|
||||
struct mlx5_sq_bfreg bfreg;
|
||||
struct mlx5_sq_bfreg fp_bfreg;
|
||||
};
|
||||
|
||||
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
||||
|
@ -787,8 +769,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_dealloc_mw(struct ib_mw *mw);
|
||||
int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
|
||||
int npages, int zap);
|
||||
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
|
||||
int page_shift, int flags);
|
||||
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
||||
u64 length, u64 virt_addr, int access_flags,
|
||||
struct ib_pd *pd, struct ib_udata *udata);
|
||||
|
@ -857,18 +839,13 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
|||
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
extern struct workqueue_struct *mlx5_ib_page_fault_wq;
|
||||
|
||||
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
|
||||
void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
|
||||
struct mlx5_ib_pfault *pfault);
|
||||
void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
|
||||
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
|
||||
struct mlx5_pagefault *pfault);
|
||||
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
|
||||
void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
|
||||
int __init mlx5_ib_odp_init(void);
|
||||
void mlx5_ib_odp_cleanup(void);
|
||||
void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
|
||||
void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
|
||||
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
|
||||
unsigned long end);
|
||||
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
|
||||
|
@ -877,13 +854,10 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
|
|||
return;
|
||||
}
|
||||
|
||||
static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
|
||||
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
|
||||
static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
|
||||
static inline int mlx5_ib_odp_init(void) { return 0; }
|
||||
static inline void mlx5_ib_odp_cleanup(void) {}
|
||||
static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
|
||||
static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
|
||||
|
||||
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
|
||||
|
||||
|
@ -1001,4 +975,17 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
|
|||
|
||||
return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
|
||||
}
|
||||
|
||||
static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
|
||||
{
|
||||
return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
|
||||
MLX5_UARS_IN_PAGE : 1;
|
||||
}
|
||||
|
||||
static inline int get_num_uars(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi)
|
||||
{
|
||||
return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
|
||||
}
|
||||
|
||||
#endif /* MLX5_IB_H */
|
||||
|
|
|
@ -46,14 +46,9 @@ enum {
|
|||
};
|
||||
|
||||
#define MLX5_UMR_ALIGN 2048
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
static __be64 mlx5_ib_update_mtt_emergency_buffer[
|
||||
MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
|
||||
__aligned(MLX5_UMR_ALIGN);
|
||||
static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
|
||||
#endif
|
||||
|
||||
static int clean_mr(struct mlx5_ib_mr *mr);
|
||||
static int use_umr(struct mlx5_ib_dev *dev, int order);
|
||||
|
||||
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
|
@ -134,6 +129,7 @@ static void reg_mr_callback(int status, void *context)
|
|||
return;
|
||||
}
|
||||
|
||||
mr->mmkey.type = MLX5_MKEY_MR;
|
||||
spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
|
||||
key = dev->mdev->priv.mkey_key++;
|
||||
spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
|
||||
|
@ -629,7 +625,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
|
|||
ent->dev = dev;
|
||||
|
||||
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
|
||||
(mlx5_core_is_pf(dev->mdev)))
|
||||
mlx5_core_is_pf(dev->mdev) &&
|
||||
use_umr(dev, ent->order))
|
||||
limit = dev->mdev->profile->mr_cache[i].limit;
|
||||
else
|
||||
limit = 0;
|
||||
|
@ -732,6 +729,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
|
|||
goto err_in;
|
||||
|
||||
kfree(in);
|
||||
mr->mmkey.type = MLX5_MKEY_MR;
|
||||
mr->ibmr.lkey = mr->mmkey.key;
|
||||
mr->ibmr.rkey = mr->mmkey.key;
|
||||
mr->umem = NULL;
|
||||
|
@ -757,94 +755,13 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
|
|||
return (npages + 1) / 2;
|
||||
}
|
||||
|
||||
static int use_umr(int order)
|
||||
static int use_umr(struct mlx5_ib_dev *dev, int order)
|
||||
{
|
||||
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
|
||||
return order < MAX_MR_CACHE_ENTRIES + 2;
|
||||
return order <= MLX5_MAX_UMR_SHIFT;
|
||||
}
|
||||
|
||||
static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
|
||||
int npages, int page_shift, int *size,
|
||||
__be64 **mr_pas, dma_addr_t *dma)
|
||||
{
|
||||
__be64 *pas;
|
||||
struct device *ddev = dev->ib_dev.dma_device;
|
||||
|
||||
/*
|
||||
* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
|
||||
* To avoid copying garbage after the pas array, we allocate
|
||||
* a little more.
|
||||
*/
|
||||
*size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
|
||||
*mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
|
||||
if (!(*mr_pas))
|
||||
return -ENOMEM;
|
||||
|
||||
pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
|
||||
mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
|
||||
/* Clear padding after the actual pages. */
|
||||
memset(pas + npages, 0, *size - npages * sizeof(u64));
|
||||
|
||||
*dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ddev, *dma)) {
|
||||
kfree(*mr_pas);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
|
||||
struct ib_sge *sg, u64 dma, int n, u32 key,
|
||||
int page_shift)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
sg->addr = dma;
|
||||
sg->length = ALIGN(sizeof(u64) * n, 64);
|
||||
sg->lkey = dev->umrc.pd->local_dma_lkey;
|
||||
|
||||
wr->next = NULL;
|
||||
wr->sg_list = sg;
|
||||
if (n)
|
||||
wr->num_sge = 1;
|
||||
else
|
||||
wr->num_sge = 0;
|
||||
|
||||
wr->opcode = MLX5_IB_WR_UMR;
|
||||
|
||||
umrwr->npages = n;
|
||||
umrwr->page_shift = page_shift;
|
||||
umrwr->mkey = key;
|
||||
}
|
||||
|
||||
static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
|
||||
struct ib_sge *sg, u64 dma, int n, u32 key,
|
||||
int page_shift, u64 virt_addr, u64 len,
|
||||
int access_flags)
|
||||
{
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
|
||||
|
||||
wr->send_flags = 0;
|
||||
|
||||
umrwr->target.virt_addr = virt_addr;
|
||||
umrwr->length = len;
|
||||
umrwr->access_flags = access_flags;
|
||||
umrwr->pd = pd;
|
||||
}
|
||||
|
||||
static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
|
||||
struct ib_send_wr *wr, u32 key)
|
||||
{
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
||||
wr->opcode = MLX5_IB_WR_UMR;
|
||||
umrwr->mkey = key;
|
||||
}
|
||||
|
||||
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
|
||||
int access_flags, struct ib_umem **umem,
|
||||
int *npages, int *page_shift, int *ncont,
|
||||
|
@ -891,21 +808,39 @@ static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
|
|||
init_completion(&context->done);
|
||||
}
|
||||
|
||||
static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_umr_wr *umrwr)
|
||||
{
|
||||
struct umr_common *umrc = &dev->umrc;
|
||||
struct ib_send_wr *bad;
|
||||
int err;
|
||||
struct mlx5_ib_umr_context umr_context;
|
||||
|
||||
mlx5_ib_init_umr_context(&umr_context);
|
||||
umrwr->wr.wr_cqe = &umr_context.cqe;
|
||||
|
||||
down(&umrc->sem);
|
||||
err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
|
||||
} else {
|
||||
wait_for_completion(&umr_context.done);
|
||||
if (umr_context.status != IB_WC_SUCCESS) {
|
||||
mlx5_ib_warn(dev, "reg umr failed (%u)\n",
|
||||
umr_context.status);
|
||||
err = -EFAULT;
|
||||
}
|
||||
}
|
||||
up(&umrc->sem);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
|
||||
u64 virt_addr, u64 len, int npages,
|
||||
int page_shift, int order, int access_flags)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct device *ddev = dev->ib_dev.dma_device;
|
||||
struct umr_common *umrc = &dev->umrc;
|
||||
struct mlx5_ib_umr_context umr_context;
|
||||
struct mlx5_umr_wr umrwr = {};
|
||||
struct ib_send_wr *bad;
|
||||
struct mlx5_ib_mr *mr;
|
||||
struct ib_sge sg;
|
||||
int size;
|
||||
__be64 *mr_pas;
|
||||
dma_addr_t dma;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
|
@ -924,173 +859,174 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
|
|||
if (!mr)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
|
||||
&dma);
|
||||
if (err)
|
||||
goto free_mr;
|
||||
|
||||
mlx5_ib_init_umr_context(&umr_context);
|
||||
|
||||
umrwr.wr.wr_cqe = &umr_context.cqe;
|
||||
prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
|
||||
page_shift, virt_addr, len, access_flags);
|
||||
|
||||
down(&umrc->sem);
|
||||
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "post send failed, err %d\n", err);
|
||||
goto unmap_dma;
|
||||
} else {
|
||||
wait_for_completion(&umr_context.done);
|
||||
if (umr_context.status != IB_WC_SUCCESS) {
|
||||
mlx5_ib_warn(dev, "reg umr failed\n");
|
||||
err = -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
mr->ibmr.pd = pd;
|
||||
mr->umem = umem;
|
||||
mr->access_flags = access_flags;
|
||||
mr->desc_size = sizeof(struct mlx5_mtt);
|
||||
mr->mmkey.iova = virt_addr;
|
||||
mr->mmkey.size = len;
|
||||
mr->mmkey.pd = to_mpd(pd)->pdn;
|
||||
|
||||
mr->live = 1;
|
||||
err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
|
||||
MLX5_IB_UPD_XLT_ENABLE);
|
||||
|
||||
unmap_dma:
|
||||
up(&umrc->sem);
|
||||
dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
|
||||
|
||||
kfree(mr_pas);
|
||||
|
||||
free_mr:
|
||||
if (err) {
|
||||
free_cached_mr(dev, mr);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
mr->live = 1;
|
||||
|
||||
return mr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
|
||||
int zap)
|
||||
static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
|
||||
void *xlt, int page_shift, size_t size,
|
||||
int flags)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = mr->dev;
|
||||
struct ib_umem *umem = mr->umem;
|
||||
|
||||
npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
|
||||
|
||||
if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
|
||||
__mlx5_ib_populate_pas(dev, umem, page_shift,
|
||||
idx, npages, xlt,
|
||||
MLX5_IB_MTT_PRESENT);
|
||||
/* Clear padding after the pages
|
||||
* brought from the umem.
|
||||
*/
|
||||
memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
|
||||
size - npages * sizeof(struct mlx5_mtt));
|
||||
}
|
||||
|
||||
return npages;
|
||||
}
|
||||
|
||||
#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
|
||||
MLX5_UMR_MTT_ALIGNMENT)
|
||||
#define MLX5_SPARE_UMR_CHUNK 0x10000
|
||||
|
||||
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
|
||||
int page_shift, int flags)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = mr->dev;
|
||||
struct device *ddev = dev->ib_dev.dma_device;
|
||||
struct umr_common *umrc = &dev->umrc;
|
||||
struct mlx5_ib_umr_context umr_context;
|
||||
struct ib_umem *umem = mr->umem;
|
||||
struct mlx5_ib_ucontext *uctx = NULL;
|
||||
int size;
|
||||
__be64 *pas;
|
||||
void *xlt;
|
||||
dma_addr_t dma;
|
||||
struct ib_send_wr *bad;
|
||||
struct mlx5_umr_wr wr;
|
||||
struct ib_sge sg;
|
||||
int err = 0;
|
||||
const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
|
||||
const int page_index_mask = page_index_alignment - 1;
|
||||
int desc_size = sizeof(struct mlx5_mtt);
|
||||
const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
|
||||
const int page_mask = page_align - 1;
|
||||
size_t pages_mapped = 0;
|
||||
size_t pages_to_map = 0;
|
||||
size_t pages_iter = 0;
|
||||
int use_emergency_buf = 0;
|
||||
gfp_t gfp;
|
||||
|
||||
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
|
||||
* so we need to align the offset and length accordingly */
|
||||
if (start_page_index & page_index_mask) {
|
||||
npages += start_page_index & page_index_mask;
|
||||
start_page_index &= ~page_index_mask;
|
||||
* so we need to align the offset and length accordingly
|
||||
*/
|
||||
if (idx & page_mask) {
|
||||
npages += idx & page_mask;
|
||||
idx &= ~page_mask;
|
||||
}
|
||||
|
||||
pages_to_map = ALIGN(npages, page_index_alignment);
|
||||
gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
|
||||
gfp |= __GFP_ZERO | __GFP_NOWARN;
|
||||
|
||||
if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
|
||||
return -EINVAL;
|
||||
pages_to_map = ALIGN(npages, page_align);
|
||||
size = desc_size * pages_to_map;
|
||||
size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
|
||||
|
||||
size = sizeof(u64) * pages_to_map;
|
||||
size = min_t(int, PAGE_SIZE, size);
|
||||
/* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
|
||||
* code, when we are called from an invalidation. The pas buffer must
|
||||
* be 2k-aligned for Connect-IB. */
|
||||
pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (!pas) {
|
||||
mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
|
||||
pas = mlx5_ib_update_mtt_emergency_buffer;
|
||||
size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
|
||||
use_emergency_buf = 1;
|
||||
mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
|
||||
memset(pas, 0, size);
|
||||
xlt = (void *)__get_free_pages(gfp, get_order(size));
|
||||
if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
|
||||
mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
|
||||
size, get_order(size), MLX5_SPARE_UMR_CHUNK);
|
||||
|
||||
size = MLX5_SPARE_UMR_CHUNK;
|
||||
xlt = (void *)__get_free_pages(gfp, get_order(size));
|
||||
}
|
||||
pages_iter = size / sizeof(u64);
|
||||
dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
|
||||
|
||||
if (!xlt) {
|
||||
uctx = to_mucontext(mr->ibmr.uobject->context);
|
||||
mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
|
||||
size = PAGE_SIZE;
|
||||
xlt = (void *)uctx->upd_xlt_page;
|
||||
mutex_lock(&uctx->upd_xlt_page_mutex);
|
||||
memset(xlt, 0, size);
|
||||
}
|
||||
pages_iter = size / desc_size;
|
||||
dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ddev, dma)) {
|
||||
mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
|
||||
mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
|
||||
err = -ENOMEM;
|
||||
goto free_pas;
|
||||
goto free_xlt;
|
||||
}
|
||||
|
||||
sg.addr = dma;
|
||||
sg.lkey = dev->umrc.pd->local_dma_lkey;
|
||||
|
||||
memset(&wr, 0, sizeof(wr));
|
||||
wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
|
||||
if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
|
||||
wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
||||
wr.wr.sg_list = &sg;
|
||||
wr.wr.num_sge = 1;
|
||||
wr.wr.opcode = MLX5_IB_WR_UMR;
|
||||
|
||||
wr.pd = mr->ibmr.pd;
|
||||
wr.mkey = mr->mmkey.key;
|
||||
wr.length = mr->mmkey.size;
|
||||
wr.virt_addr = mr->mmkey.iova;
|
||||
wr.access_flags = mr->access_flags;
|
||||
wr.page_shift = page_shift;
|
||||
|
||||
for (pages_mapped = 0;
|
||||
pages_mapped < pages_to_map && !err;
|
||||
pages_mapped += pages_iter, start_page_index += pages_iter) {
|
||||
pages_mapped += pages_iter, idx += pages_iter) {
|
||||
dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
|
||||
|
||||
npages = min_t(size_t,
|
||||
pages_iter,
|
||||
ib_umem_num_pages(umem) - start_page_index);
|
||||
|
||||
if (!zap) {
|
||||
__mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
|
||||
start_page_index, npages, pas,
|
||||
MLX5_IB_MTT_PRESENT);
|
||||
/* Clear padding after the pages brought from the
|
||||
* umem. */
|
||||
memset(pas + npages, 0, size - npages * sizeof(u64));
|
||||
}
|
||||
npages = populate_xlt(mr, idx, pages_iter, xlt,
|
||||
page_shift, size, flags);
|
||||
|
||||
dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
|
||||
|
||||
mlx5_ib_init_umr_context(&umr_context);
|
||||
sg.length = ALIGN(npages * desc_size,
|
||||
MLX5_UMR_MTT_ALIGNMENT);
|
||||
|
||||
memset(&wr, 0, sizeof(wr));
|
||||
wr.wr.wr_cqe = &umr_context.cqe;
|
||||
|
||||
sg.addr = dma;
|
||||
sg.length = ALIGN(npages * sizeof(u64),
|
||||
MLX5_UMR_MTT_ALIGNMENT);
|
||||
sg.lkey = dev->umrc.pd->local_dma_lkey;
|
||||
|
||||
wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
|
||||
MLX5_IB_SEND_UMR_UPDATE_MTT;
|
||||
wr.wr.sg_list = &sg;
|
||||
wr.wr.num_sge = 1;
|
||||
wr.wr.opcode = MLX5_IB_WR_UMR;
|
||||
wr.npages = sg.length / sizeof(u64);
|
||||
wr.page_shift = PAGE_SHIFT;
|
||||
wr.mkey = mr->mmkey.key;
|
||||
wr.target.offset = start_page_index;
|
||||
|
||||
down(&umrc->sem);
|
||||
err = ib_post_send(umrc->qp, &wr.wr, &bad);
|
||||
if (err) {
|
||||
mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
|
||||
} else {
|
||||
wait_for_completion(&umr_context.done);
|
||||
if (umr_context.status != IB_WC_SUCCESS) {
|
||||
mlx5_ib_err(dev, "UMR completion failed, code %d\n",
|
||||
umr_context.status);
|
||||
err = -EFAULT;
|
||||
}
|
||||
if (pages_mapped + pages_iter >= pages_to_map) {
|
||||
if (flags & MLX5_IB_UPD_XLT_ENABLE)
|
||||
wr.wr.send_flags |=
|
||||
MLX5_IB_SEND_UMR_ENABLE_MR |
|
||||
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
|
||||
MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
|
||||
if (flags & MLX5_IB_UPD_XLT_PD ||
|
||||
flags & MLX5_IB_UPD_XLT_ACCESS)
|
||||
wr.wr.send_flags |=
|
||||
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
|
||||
if (flags & MLX5_IB_UPD_XLT_ADDR)
|
||||
wr.wr.send_flags |=
|
||||
MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
|
||||
}
|
||||
up(&umrc->sem);
|
||||
|
||||
wr.offset = idx * desc_size;
|
||||
wr.xlt_size = sg.length;
|
||||
|
||||
err = mlx5_ib_post_send_wait(dev, &wr);
|
||||
}
|
||||
dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
|
||||
|
||||
free_pas:
|
||||
if (!use_emergency_buf)
|
||||
free_page((unsigned long)pas);
|
||||
free_xlt:
|
||||
if (uctx)
|
||||
mutex_unlock(&uctx->upd_xlt_page_mutex);
|
||||
else
|
||||
mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
|
||||
free_pages((unsigned long)xlt, get_order(size));
|
||||
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If ibmr is NULL it will be allocated by reg_create.
|
||||
|
@ -1122,8 +1058,9 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|||
goto err_1;
|
||||
}
|
||||
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
|
||||
mlx5_ib_populate_pas(dev, umem, page_shift, pas,
|
||||
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
|
||||
if (!(access_flags & IB_ACCESS_ON_DEMAND))
|
||||
mlx5_ib_populate_pas(dev, umem, page_shift, pas,
|
||||
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
|
||||
|
||||
/* The pg_access bit allows setting the access flags
|
||||
* in the page list submitted with the command. */
|
||||
|
@ -1153,6 +1090,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|||
mlx5_ib_warn(dev, "create mkey failed\n");
|
||||
goto err_2;
|
||||
}
|
||||
mr->mmkey.type = MLX5_MKEY_MR;
|
||||
mr->umem = umem;
|
||||
mr->dev = dev;
|
||||
mr->live = 1;
|
||||
|
@ -1204,14 +1142,15 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (use_umr(order)) {
|
||||
if (use_umr(dev, order)) {
|
||||
mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
|
||||
order, access_flags);
|
||||
if (PTR_ERR(mr) == -EAGAIN) {
|
||||
mlx5_ib_dbg(dev, "cache empty for order %d", order);
|
||||
mr = NULL;
|
||||
}
|
||||
} else if (access_flags & IB_ACCESS_ON_DEMAND) {
|
||||
} else if (access_flags & IB_ACCESS_ON_DEMAND &&
|
||||
!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
|
||||
err = -EINVAL;
|
||||
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
|
||||
goto error;
|
||||
|
@ -1248,106 +1187,39 @@ error:
|
|||
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct umr_common *umrc = &dev->umrc;
|
||||
struct mlx5_ib_umr_context umr_context;
|
||||
struct mlx5_umr_wr umrwr = {};
|
||||
struct ib_send_wr *bad;
|
||||
int err;
|
||||
|
||||
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
return 0;
|
||||
|
||||
mlx5_ib_init_umr_context(&umr_context);
|
||||
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
|
||||
MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
||||
umrwr.wr.opcode = MLX5_IB_WR_UMR;
|
||||
umrwr.mkey = mr->mmkey.key;
|
||||
|
||||
umrwr.wr.wr_cqe = &umr_context.cqe;
|
||||
prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
|
||||
|
||||
down(&umrc->sem);
|
||||
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
|
||||
if (err) {
|
||||
up(&umrc->sem);
|
||||
mlx5_ib_dbg(dev, "err %d\n", err);
|
||||
goto error;
|
||||
} else {
|
||||
wait_for_completion(&umr_context.done);
|
||||
up(&umrc->sem);
|
||||
}
|
||||
if (umr_context.status != IB_WC_SUCCESS) {
|
||||
mlx5_ib_warn(dev, "unreg umr failed\n");
|
||||
err = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
return 0;
|
||||
|
||||
error:
|
||||
return err;
|
||||
return mlx5_ib_post_send_wait(dev, &umrwr);
|
||||
}
|
||||
|
||||
static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
|
||||
u64 length, int npages, int page_shift, int order,
|
||||
static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
|
||||
int access_flags, int flags)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct device *ddev = dev->ib_dev.dma_device;
|
||||
struct mlx5_ib_umr_context umr_context;
|
||||
struct ib_send_wr *bad;
|
||||
struct mlx5_umr_wr umrwr = {};
|
||||
struct ib_sge sg;
|
||||
struct umr_common *umrc = &dev->umrc;
|
||||
dma_addr_t dma = 0;
|
||||
__be64 *mr_pas = NULL;
|
||||
int size;
|
||||
int err;
|
||||
|
||||
mlx5_ib_init_umr_context(&umr_context);
|
||||
|
||||
umrwr.wr.wr_cqe = &umr_context.cqe;
|
||||
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
||||
|
||||
if (flags & IB_MR_REREG_TRANS) {
|
||||
err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
|
||||
&mr_pas, &dma);
|
||||
if (err)
|
||||
return err;
|
||||
umrwr.wr.opcode = MLX5_IB_WR_UMR;
|
||||
umrwr.mkey = mr->mmkey.key;
|
||||
|
||||
umrwr.target.virt_addr = virt_addr;
|
||||
umrwr.length = length;
|
||||
umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
|
||||
}
|
||||
|
||||
prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
|
||||
page_shift);
|
||||
|
||||
if (flags & IB_MR_REREG_PD) {
|
||||
if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
|
||||
umrwr.pd = pd;
|
||||
umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
|
||||
}
|
||||
|
||||
if (flags & IB_MR_REREG_ACCESS) {
|
||||
umrwr.access_flags = access_flags;
|
||||
umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
|
||||
umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
|
||||
}
|
||||
|
||||
/* post send request to UMR QP */
|
||||
down(&umrc->sem);
|
||||
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
|
||||
err = mlx5_ib_post_send_wait(dev, &umrwr);
|
||||
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "post send failed, err %d\n", err);
|
||||
} else {
|
||||
wait_for_completion(&umr_context.done);
|
||||
if (umr_context.status != IB_WC_SUCCESS) {
|
||||
mlx5_ib_warn(dev, "reg umr failed (%u)\n",
|
||||
umr_context.status);
|
||||
err = -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
up(&umrc->sem);
|
||||
if (flags & IB_MR_REREG_TRANS) {
|
||||
dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
|
||||
kfree(mr_pas);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1364,6 +1236,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|||
u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
|
||||
u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
|
||||
int page_shift = 0;
|
||||
int upd_flags = 0;
|
||||
int npages = 0;
|
||||
int ncont = 0;
|
||||
int order = 0;
|
||||
|
@ -1372,6 +1245,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|||
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
|
||||
start, virt_addr, length, access_flags);
|
||||
|
||||
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
|
||||
|
||||
if (flags != IB_MR_REREG_PD) {
|
||||
/*
|
||||
* Replace umem. This needs to be done whether or not UMR is
|
||||
|
@ -1382,7 +1257,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|||
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
|
||||
&npages, &page_shift, &ncont, &order);
|
||||
if (err < 0) {
|
||||
mr->umem = NULL;
|
||||
clean_mr(mr);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
@ -1414,32 +1289,37 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|||
/*
|
||||
* Send a UMR WQE
|
||||
*/
|
||||
err = rereg_umr(pd, mr, addr, len, npages, page_shift,
|
||||
order, access_flags, flags);
|
||||
mr->ibmr.pd = pd;
|
||||
mr->access_flags = access_flags;
|
||||
mr->mmkey.iova = addr;
|
||||
mr->mmkey.size = len;
|
||||
mr->mmkey.pd = to_mpd(pd)->pdn;
|
||||
|
||||
if (flags & IB_MR_REREG_TRANS) {
|
||||
upd_flags = MLX5_IB_UPD_XLT_ADDR;
|
||||
if (flags & IB_MR_REREG_PD)
|
||||
upd_flags |= MLX5_IB_UPD_XLT_PD;
|
||||
if (flags & IB_MR_REREG_ACCESS)
|
||||
upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
|
||||
err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
|
||||
upd_flags);
|
||||
} else {
|
||||
err = rereg_umr(pd, mr, access_flags, flags);
|
||||
}
|
||||
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "Failed to rereg UMR\n");
|
||||
ib_umem_release(mr->umem);
|
||||
clean_mr(mr);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & IB_MR_REREG_PD) {
|
||||
ib_mr->pd = pd;
|
||||
mr->mmkey.pd = to_mpd(pd)->pdn;
|
||||
}
|
||||
set_mr_fileds(dev, mr, npages, len, access_flags);
|
||||
|
||||
if (flags & IB_MR_REREG_ACCESS)
|
||||
mr->access_flags = access_flags;
|
||||
|
||||
if (flags & IB_MR_REREG_TRANS) {
|
||||
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
|
||||
set_mr_fileds(dev, mr, npages, len, access_flags);
|
||||
mr->mmkey.iova = addr;
|
||||
mr->mmkey.size = len;
|
||||
}
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
update_odp_mr(mr);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1603,11 +1483,11 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
|||
mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
|
||||
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
|
||||
err = mlx5_alloc_priv_descs(pd->device, mr,
|
||||
ndescs, sizeof(u64));
|
||||
ndescs, sizeof(struct mlx5_mtt));
|
||||
if (err)
|
||||
goto err_free_in;
|
||||
|
||||
mr->desc_size = sizeof(u64);
|
||||
mr->desc_size = sizeof(struct mlx5_mtt);
|
||||
mr->max_descs = ndescs;
|
||||
} else if (mr_type == IB_MR_TYPE_SG_GAPS) {
|
||||
mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
|
||||
|
@ -1656,6 +1536,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
|||
if (err)
|
||||
goto err_destroy_psv;
|
||||
|
||||
mr->mmkey.type = MLX5_MKEY_MR;
|
||||
mr->ibmr.lkey = mr->mmkey.key;
|
||||
mr->ibmr.rkey = mr->mmkey.key;
|
||||
mr->umem = NULL;
|
||||
|
@ -1736,6 +1617,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
|||
if (err)
|
||||
goto free;
|
||||
|
||||
mw->mmkey.type = MLX5_MKEY_MW;
|
||||
mw->ibmw.rkey = mw->mmkey.key;
|
||||
|
||||
resp.response_length = min(offsetof(typeof(resp), response_length) +
|
||||
|
|
|
@ -41,13 +41,12 @@
|
|||
* a pagefault. */
|
||||
#define MMU_NOTIFIER_TIMEOUT 1000
|
||||
|
||||
struct workqueue_struct *mlx5_ib_page_fault_wq;
|
||||
|
||||
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct mlx5_ib_mr *mr;
|
||||
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / sizeof(u64)) - 1;
|
||||
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
|
||||
sizeof(struct mlx5_mtt)) - 1;
|
||||
u64 idx = 0, blk_start_idx = 0;
|
||||
int in_block = 0;
|
||||
u64 addr;
|
||||
|
@ -90,16 +89,21 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
|
|||
u64 umr_offset = idx & umr_block_mask;
|
||||
|
||||
if (in_block && umr_offset == 0) {
|
||||
mlx5_ib_update_mtt(mr, blk_start_idx,
|
||||
idx - blk_start_idx, 1);
|
||||
mlx5_ib_update_xlt(mr, blk_start_idx,
|
||||
idx - blk_start_idx,
|
||||
PAGE_SHIFT,
|
||||
MLX5_IB_UPD_XLT_ZAP |
|
||||
MLX5_IB_UPD_XLT_ATOMIC);
|
||||
in_block = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (in_block)
|
||||
mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1,
|
||||
1);
|
||||
|
||||
mlx5_ib_update_xlt(mr, blk_start_idx,
|
||||
idx - blk_start_idx + 1,
|
||||
PAGE_SHIFT,
|
||||
MLX5_IB_UPD_XLT_ZAP |
|
||||
MLX5_IB_UPD_XLT_ATOMIC);
|
||||
/*
|
||||
* We are now sure that the device will not access the
|
||||
* memory. We can safely unmap it, and mark it as dirty if
|
||||
|
@ -120,6 +124,11 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
|
|||
|
||||
caps->general_caps = IB_ODP_SUPPORT;
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
|
||||
dev->odp_max_size = U64_MAX;
|
||||
else
|
||||
dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
|
||||
|
||||
if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
|
||||
caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
|
||||
|
||||
|
@ -135,6 +144,9 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
|
|||
if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
|
||||
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
|
||||
|
||||
if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
|
||||
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -143,46 +155,51 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
|
|||
{
|
||||
u32 base_key = mlx5_base_mkey(key);
|
||||
struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key);
|
||||
struct mlx5_ib_mr *mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
|
||||
struct mlx5_ib_mr *mr;
|
||||
|
||||
if (!mmkey || mmkey->key != key || !mr->live)
|
||||
if (!mmkey || mmkey->key != key || mmkey->type != MLX5_MKEY_MR)
|
||||
return NULL;
|
||||
|
||||
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
|
||||
|
||||
if (!mr->live)
|
||||
return NULL;
|
||||
|
||||
return container_of(mmkey, struct mlx5_ib_mr, mmkey);
|
||||
}
|
||||
|
||||
static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
|
||||
struct mlx5_ib_pfault *pfault,
|
||||
static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_pagefault *pfault,
|
||||
int error)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
|
||||
u32 qpn = qp->trans_qp.base.mqp.qpn;
|
||||
int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
|
||||
pfault->wqe.wq_num : pfault->token;
|
||||
int ret = mlx5_core_page_fault_resume(dev->mdev,
|
||||
qpn,
|
||||
pfault->mpfault.flags,
|
||||
pfault->token,
|
||||
wq_num,
|
||||
pfault->type,
|
||||
error);
|
||||
if (ret)
|
||||
pr_err("Failed to resolve the page fault on QP 0x%x\n", qpn);
|
||||
mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n",
|
||||
wq_num);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a single data segment in a page-fault WQE.
|
||||
* Handle a single data segment in a page-fault WQE or RDMA region.
|
||||
*
|
||||
* Returns number of pages retrieved on success. The caller will continue to
|
||||
* Returns number of pages retrieved on success. The caller may continue to
|
||||
* the next data segment.
|
||||
* Can return the following error codes:
|
||||
* -EAGAIN to designate a temporary error. The caller will abort handling the
|
||||
* page fault and resolve it.
|
||||
* -EFAULT when there's an error mapping the requested pages. The caller will
|
||||
* abort the page fault handling and possibly move the QP to an error state.
|
||||
* On other errors the QP should also be closed with an error.
|
||||
* abort the page fault handling.
|
||||
*/
|
||||
static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
|
||||
struct mlx5_ib_pfault *pfault,
|
||||
static int pagefault_single_data_segment(struct mlx5_ib_dev *mib_dev,
|
||||
u32 key, u64 io_virt, size_t bcnt,
|
||||
u32 *bytes_committed,
|
||||
u32 *bytes_mapped)
|
||||
{
|
||||
struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device);
|
||||
int srcu_key;
|
||||
unsigned int current_seq;
|
||||
u64 start_idx;
|
||||
|
@ -208,12 +225,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
|
|||
key);
|
||||
if (bytes_mapped)
|
||||
*bytes_mapped +=
|
||||
(bcnt - pfault->mpfault.bytes_committed);
|
||||
goto srcu_unlock;
|
||||
}
|
||||
if (mr->ibmr.pd != qp->ibqp.pd) {
|
||||
pr_err("Page-fault with different PDs for QP and MR.\n");
|
||||
ret = -EFAULT;
|
||||
(bcnt - *bytes_committed);
|
||||
goto srcu_unlock;
|
||||
}
|
||||
|
||||
|
@ -229,8 +241,8 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
|
|||
* in all iterations (in iteration 2 and above,
|
||||
* bytes_committed == 0).
|
||||
*/
|
||||
io_virt += pfault->mpfault.bytes_committed;
|
||||
bcnt -= pfault->mpfault.bytes_committed;
|
||||
io_virt += *bytes_committed;
|
||||
bcnt -= *bytes_committed;
|
||||
|
||||
start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
|
||||
|
@ -251,7 +263,9 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
|
|||
* this MR, since ib_umem_odp_map_dma_pages already
|
||||
* checks this.
|
||||
*/
|
||||
ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0);
|
||||
ret = mlx5_ib_update_xlt(mr, start_idx, npages,
|
||||
PAGE_SHIFT,
|
||||
MLX5_IB_UPD_XLT_ATOMIC);
|
||||
} else {
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
|
@ -287,7 +301,7 @@ srcu_unlock:
|
|||
}
|
||||
}
|
||||
srcu_read_unlock(&mib_dev->mr_srcu, srcu_key);
|
||||
pfault->mpfault.bytes_committed = 0;
|
||||
*bytes_committed = 0;
|
||||
return ret ? ret : npages;
|
||||
}
|
||||
|
||||
|
@ -309,8 +323,9 @@ srcu_unlock:
|
|||
* Returns the number of pages loaded if positive, zero for an empty WQE, or a
|
||||
* negative error code.
|
||||
*/
|
||||
static int pagefault_data_segments(struct mlx5_ib_qp *qp,
|
||||
struct mlx5_ib_pfault *pfault, void *wqe,
|
||||
static int pagefault_data_segments(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_pagefault *pfault,
|
||||
struct mlx5_ib_qp *qp, void *wqe,
|
||||
void *wqe_end, u32 *bytes_mapped,
|
||||
u32 *total_wqe_bytes, int receive_queue)
|
||||
{
|
||||
|
@ -354,22 +369,23 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp,
|
|||
|
||||
if (!inline_segment && total_wqe_bytes) {
|
||||
*total_wqe_bytes += bcnt - min_t(size_t, bcnt,
|
||||
pfault->mpfault.bytes_committed);
|
||||
pfault->bytes_committed);
|
||||
}
|
||||
|
||||
/* A zero length data segment designates a length of 2GB. */
|
||||
if (bcnt == 0)
|
||||
bcnt = 1U << 31;
|
||||
|
||||
if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) {
|
||||
pfault->mpfault.bytes_committed -=
|
||||
if (inline_segment || bcnt <= pfault->bytes_committed) {
|
||||
pfault->bytes_committed -=
|
||||
min_t(size_t, bcnt,
|
||||
pfault->mpfault.bytes_committed);
|
||||
pfault->bytes_committed);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = pagefault_single_data_segment(qp, pfault, key, io_virt,
|
||||
bcnt, bytes_mapped);
|
||||
ret = pagefault_single_data_segment(dev, key, io_virt, bcnt,
|
||||
&pfault->bytes_committed,
|
||||
bytes_mapped);
|
||||
if (ret < 0)
|
||||
break;
|
||||
npages += ret;
|
||||
|
@ -378,17 +394,29 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp,
|
|||
return ret < 0 ? ret : npages;
|
||||
}
|
||||
|
||||
static const u32 mlx5_ib_odp_opcode_cap[] = {
|
||||
[MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND,
|
||||
[MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND,
|
||||
[MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND,
|
||||
[MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE,
|
||||
[MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE,
|
||||
[MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ,
|
||||
[MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC,
|
||||
[MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC,
|
||||
};
|
||||
|
||||
/*
|
||||
* Parse initiator WQE. Advances the wqe pointer to point at the
|
||||
* scatter-gather list, and set wqe_end to the end of the WQE.
|
||||
*/
|
||||
static int mlx5_ib_mr_initiator_pfault_handler(
|
||||
struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
|
||||
void **wqe, void **wqe_end, int wqe_length)
|
||||
struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
|
||||
struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
|
||||
struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
|
||||
u16 wqe_index = pfault->mpfault.wqe.wqe_index;
|
||||
u16 wqe_index = pfault->wqe.wqe_index;
|
||||
u32 transport_caps;
|
||||
struct mlx5_base_av *av;
|
||||
unsigned ds, opcode;
|
||||
#if defined(DEBUG)
|
||||
u32 ctrl_wqe_index, ctrl_qpn;
|
||||
|
@ -434,53 +462,49 @@ static int mlx5_ib_mr_initiator_pfault_handler(
|
|||
|
||||
opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
|
||||
MLX5_WQE_CTRL_OPCODE_MASK;
|
||||
|
||||
switch (qp->ibqp.qp_type) {
|
||||
case IB_QPT_RC:
|
||||
switch (opcode) {
|
||||
case MLX5_OPCODE_SEND:
|
||||
case MLX5_OPCODE_SEND_IMM:
|
||||
case MLX5_OPCODE_SEND_INVAL:
|
||||
if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
|
||||
IB_ODP_SUPPORT_SEND))
|
||||
goto invalid_transport_or_opcode;
|
||||
break;
|
||||
case MLX5_OPCODE_RDMA_WRITE:
|
||||
case MLX5_OPCODE_RDMA_WRITE_IMM:
|
||||
if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
|
||||
IB_ODP_SUPPORT_WRITE))
|
||||
goto invalid_transport_or_opcode;
|
||||
*wqe += sizeof(struct mlx5_wqe_raddr_seg);
|
||||
break;
|
||||
case MLX5_OPCODE_RDMA_READ:
|
||||
if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
|
||||
IB_ODP_SUPPORT_READ))
|
||||
goto invalid_transport_or_opcode;
|
||||
*wqe += sizeof(struct mlx5_wqe_raddr_seg);
|
||||
break;
|
||||
default:
|
||||
goto invalid_transport_or_opcode;
|
||||
}
|
||||
transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
|
||||
break;
|
||||
case IB_QPT_UD:
|
||||
switch (opcode) {
|
||||
case MLX5_OPCODE_SEND:
|
||||
case MLX5_OPCODE_SEND_IMM:
|
||||
if (!(dev->odp_caps.per_transport_caps.ud_odp_caps &
|
||||
IB_ODP_SUPPORT_SEND))
|
||||
goto invalid_transport_or_opcode;
|
||||
*wqe += sizeof(struct mlx5_wqe_datagram_seg);
|
||||
break;
|
||||
default:
|
||||
goto invalid_transport_or_opcode;
|
||||
}
|
||||
transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
|
||||
break;
|
||||
default:
|
||||
invalid_transport_or_opcode:
|
||||
mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n",
|
||||
qp->ibqp.qp_type, opcode);
|
||||
mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
|
||||
qp->ibqp.qp_type);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (unlikely(opcode >= sizeof(mlx5_ib_odp_opcode_cap) /
|
||||
sizeof(mlx5_ib_odp_opcode_cap[0]) ||
|
||||
!(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
|
||||
mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
|
||||
opcode);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (qp->ibqp.qp_type != IB_QPT_RC) {
|
||||
av = *wqe;
|
||||
if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT))
|
||||
*wqe += sizeof(struct mlx5_av);
|
||||
else
|
||||
*wqe += sizeof(struct mlx5_base_av);
|
||||
}
|
||||
|
||||
switch (opcode) {
|
||||
case MLX5_OPCODE_RDMA_WRITE:
|
||||
case MLX5_OPCODE_RDMA_WRITE_IMM:
|
||||
case MLX5_OPCODE_RDMA_READ:
|
||||
*wqe += sizeof(struct mlx5_wqe_raddr_seg);
|
||||
break;
|
||||
case MLX5_OPCODE_ATOMIC_CS:
|
||||
case MLX5_OPCODE_ATOMIC_FA:
|
||||
*wqe += sizeof(struct mlx5_wqe_raddr_seg);
|
||||
*wqe += sizeof(struct mlx5_wqe_atomic_seg);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -489,10 +513,9 @@ invalid_transport_or_opcode:
|
|||
* scatter-gather list, and set wqe_end to the end of the WQE.
|
||||
*/
|
||||
static int mlx5_ib_mr_responder_pfault_handler(
|
||||
struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
|
||||
void **wqe, void **wqe_end, int wqe_length)
|
||||
struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
|
||||
struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
|
||||
struct mlx5_ib_wq *wq = &qp->rq;
|
||||
int wqe_size = 1 << wq->wqe_shift;
|
||||
|
||||
|
@ -529,70 +552,83 @@ invalid_transport_or_opcode:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
|
||||
struct mlx5_ib_pfault *pfault)
|
||||
static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev,
|
||||
u32 wq_num)
|
||||
{
|
||||
struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num);
|
||||
|
||||
if (!mqp) {
|
||||
mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return to_mibqp(mqp);
|
||||
}
|
||||
|
||||
static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_pagefault *pfault)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
|
||||
int ret;
|
||||
void *wqe, *wqe_end;
|
||||
u32 bytes_mapped, total_wqe_bytes;
|
||||
char *buffer = NULL;
|
||||
int resume_with_error = 0;
|
||||
u16 wqe_index = pfault->mpfault.wqe.wqe_index;
|
||||
int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR;
|
||||
u32 qpn = qp->trans_qp.base.mqp.qpn;
|
||||
int resume_with_error = 1;
|
||||
u16 wqe_index = pfault->wqe.wqe_index;
|
||||
int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
|
||||
struct mlx5_ib_qp *qp;
|
||||
|
||||
buffer = (char *)__get_free_page(GFP_KERNEL);
|
||||
if (!buffer) {
|
||||
mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
|
||||
resume_with_error = 1;
|
||||
goto resolve_page_fault;
|
||||
}
|
||||
|
||||
qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num);
|
||||
if (!qp)
|
||||
goto resolve_page_fault;
|
||||
|
||||
ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
|
||||
PAGE_SIZE, &qp->trans_qp.base);
|
||||
if (ret < 0) {
|
||||
mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
|
||||
-ret, wqe_index, qpn);
|
||||
resume_with_error = 1;
|
||||
mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
|
||||
ret, wqe_index, pfault->token);
|
||||
goto resolve_page_fault;
|
||||
}
|
||||
|
||||
wqe = buffer;
|
||||
if (requestor)
|
||||
ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe,
|
||||
ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe,
|
||||
&wqe_end, ret);
|
||||
else
|
||||
ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe,
|
||||
ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe,
|
||||
&wqe_end, ret);
|
||||
if (ret < 0) {
|
||||
resume_with_error = 1;
|
||||
if (ret < 0)
|
||||
goto resolve_page_fault;
|
||||
}
|
||||
|
||||
if (wqe >= wqe_end) {
|
||||
mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
|
||||
resume_with_error = 1;
|
||||
goto resolve_page_fault;
|
||||
}
|
||||
|
||||
ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped,
|
||||
&total_wqe_bytes, !requestor);
|
||||
ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end,
|
||||
&bytes_mapped, &total_wqe_bytes,
|
||||
!requestor);
|
||||
if (ret == -EAGAIN) {
|
||||
resume_with_error = 0;
|
||||
goto resolve_page_fault;
|
||||
} else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
|
||||
mlx5_ib_err(dev, "Error getting user pages for page fault. Error: 0x%x\n",
|
||||
-ret);
|
||||
resume_with_error = 1;
|
||||
if (ret != -ENOENT)
|
||||
mlx5_ib_err(dev, "Error getting user pages for page fault. Error: %d\n",
|
||||
ret);
|
||||
goto resolve_page_fault;
|
||||
}
|
||||
|
||||
resume_with_error = 0;
|
||||
resolve_page_fault:
|
||||
mlx5_ib_page_fault_resume(qp, pfault, resume_with_error);
|
||||
mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
|
||||
qpn, resume_with_error,
|
||||
pfault->mpfault.flags);
|
||||
|
||||
mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
|
||||
mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
|
||||
pfault->token, resume_with_error,
|
||||
pfault->type);
|
||||
free_page((unsigned long)buffer);
|
||||
}
|
||||
|
||||
|
@ -602,15 +638,14 @@ static int pages_in_range(u64 address, u32 length)
|
|||
(address & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
|
||||
struct mlx5_ib_pfault *pfault)
|
||||
static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_pagefault *pfault)
|
||||
{
|
||||
struct mlx5_pagefault *mpfault = &pfault->mpfault;
|
||||
u64 address;
|
||||
u32 length;
|
||||
u32 prefetch_len = mpfault->bytes_committed;
|
||||
u32 prefetch_len = pfault->bytes_committed;
|
||||
int prefetch_activated = 0;
|
||||
u32 rkey = mpfault->rdma.r_key;
|
||||
u32 rkey = pfault->rdma.r_key;
|
||||
int ret;
|
||||
|
||||
/* The RDMA responder handler handles the page fault in two parts.
|
||||
|
@ -619,38 +654,40 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
|
|||
* prefetches more pages. The second operation cannot use the pfault
|
||||
* context and therefore uses the dummy_pfault context allocated on
|
||||
* the stack */
|
||||
struct mlx5_ib_pfault dummy_pfault = {};
|
||||
pfault->rdma.rdma_va += pfault->bytes_committed;
|
||||
pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
|
||||
pfault->rdma.rdma_op_len);
|
||||
pfault->bytes_committed = 0;
|
||||
|
||||
dummy_pfault.mpfault.bytes_committed = 0;
|
||||
|
||||
mpfault->rdma.rdma_va += mpfault->bytes_committed;
|
||||
mpfault->rdma.rdma_op_len -= min(mpfault->bytes_committed,
|
||||
mpfault->rdma.rdma_op_len);
|
||||
mpfault->bytes_committed = 0;
|
||||
|
||||
address = mpfault->rdma.rdma_va;
|
||||
length = mpfault->rdma.rdma_op_len;
|
||||
address = pfault->rdma.rdma_va;
|
||||
length = pfault->rdma.rdma_op_len;
|
||||
|
||||
/* For some operations, the hardware cannot tell the exact message
|
||||
* length, and in those cases it reports zero. Use prefetch
|
||||
* logic. */
|
||||
if (length == 0) {
|
||||
prefetch_activated = 1;
|
||||
length = mpfault->rdma.packet_size;
|
||||
length = pfault->rdma.packet_size;
|
||||
prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
|
||||
}
|
||||
|
||||
ret = pagefault_single_data_segment(qp, pfault, rkey, address, length,
|
||||
NULL);
|
||||
ret = pagefault_single_data_segment(dev, rkey, address, length,
|
||||
&pfault->bytes_committed, NULL);
|
||||
if (ret == -EAGAIN) {
|
||||
/* We're racing with an invalidation, don't prefetch */
|
||||
prefetch_activated = 0;
|
||||
} else if (ret < 0 || pages_in_range(address, length) > ret) {
|
||||
mlx5_ib_page_fault_resume(qp, pfault, 1);
|
||||
mlx5_ib_page_fault_resume(dev, pfault, 1);
|
||||
if (ret != -ENOENT)
|
||||
mlx5_ib_warn(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
|
||||
ret, pfault->token, pfault->type);
|
||||
return;
|
||||
}
|
||||
|
||||
mlx5_ib_page_fault_resume(qp, pfault, 0);
|
||||
mlx5_ib_page_fault_resume(dev, pfault, 0);
|
||||
mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
|
||||
pfault->token, pfault->type,
|
||||
prefetch_activated);
|
||||
|
||||
/* At this point, there might be a new pagefault already arriving in
|
||||
* the eq, switch to the dummy pagefault for the rest of the
|
||||
|
@ -658,112 +695,39 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
|
|||
* work-queue is being fenced. */
|
||||
|
||||
if (prefetch_activated) {
|
||||
ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey,
|
||||
address,
|
||||
u32 bytes_committed = 0;
|
||||
|
||||
ret = pagefault_single_data_segment(dev, rkey, address,
|
||||
prefetch_len,
|
||||
NULL);
|
||||
&bytes_committed, NULL);
|
||||
if (ret < 0) {
|
||||
pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n",
|
||||
ret, prefetch_activated,
|
||||
qp->ibqp.qp_num, address, prefetch_len);
|
||||
mlx5_ib_warn(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
|
||||
ret, pfault->token, address,
|
||||
prefetch_len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
|
||||
struct mlx5_ib_pfault *pfault)
|
||||
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
|
||||
struct mlx5_pagefault *pfault)
|
||||
{
|
||||
u8 event_subtype = pfault->mpfault.event_subtype;
|
||||
struct mlx5_ib_dev *dev = context;
|
||||
u8 event_subtype = pfault->event_subtype;
|
||||
|
||||
switch (event_subtype) {
|
||||
case MLX5_PFAULT_SUBTYPE_WQE:
|
||||
mlx5_ib_mr_wqe_pfault_handler(qp, pfault);
|
||||
mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
|
||||
break;
|
||||
case MLX5_PFAULT_SUBTYPE_RDMA:
|
||||
mlx5_ib_mr_rdma_pfault_handler(qp, pfault);
|
||||
mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
|
||||
break;
|
||||
default:
|
||||
pr_warn("Invalid page fault event subtype: 0x%x\n",
|
||||
event_subtype);
|
||||
mlx5_ib_page_fault_resume(qp, pfault, 1);
|
||||
break;
|
||||
mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
|
||||
event_subtype);
|
||||
mlx5_ib_page_fault_resume(dev, pfault, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_ib_qp_pfault_action(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_ib_pfault *pfault = container_of(work,
|
||||
struct mlx5_ib_pfault,
|
||||
work);
|
||||
enum mlx5_ib_pagefault_context context =
|
||||
mlx5_ib_get_pagefault_context(&pfault->mpfault);
|
||||
struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp,
|
||||
pagefaults[context]);
|
||||
mlx5_ib_mr_pfault_handler(qp, pfault);
|
||||
}
|
||||
|
||||
void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
|
||||
qp->disable_page_faults = 1;
|
||||
spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
|
||||
|
||||
/*
|
||||
* Note that at this point, we are guarenteed that no more
|
||||
* work queue elements will be posted to the work queue with
|
||||
* the QP we are closing.
|
||||
*/
|
||||
flush_workqueue(mlx5_ib_page_fault_wq);
|
||||
}
|
||||
|
||||
void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
|
||||
qp->disable_page_faults = 0;
|
||||
spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
|
||||
}
|
||||
|
||||
static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp,
|
||||
struct mlx5_pagefault *pfault)
|
||||
{
|
||||
/*
|
||||
* Note that we will only get one fault event per QP per context
|
||||
* (responder/initiator, read/write), until we resolve the page fault
|
||||
* with the mlx5_ib_page_fault_resume command. Since this function is
|
||||
* called from within the work element, there is no risk of missing
|
||||
* events.
|
||||
*/
|
||||
struct mlx5_ib_qp *mibqp = to_mibqp(qp);
|
||||
enum mlx5_ib_pagefault_context context =
|
||||
mlx5_ib_get_pagefault_context(pfault);
|
||||
struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context];
|
||||
|
||||
qp_pfault->mpfault = *pfault;
|
||||
|
||||
/* No need to stop interrupts here since we are in an interrupt */
|
||||
spin_lock(&mibqp->disable_page_faults_lock);
|
||||
if (!mibqp->disable_page_faults)
|
||||
queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work);
|
||||
spin_unlock(&mibqp->disable_page_faults_lock);
|
||||
}
|
||||
|
||||
void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
|
||||
{
|
||||
int i;
|
||||
|
||||
qp->disable_page_faults = 1;
|
||||
spin_lock_init(&qp->disable_page_faults_lock);
|
||||
|
||||
qp->trans_qp.base.mqp.pfault_handler = mlx5_ib_pfault_handler;
|
||||
|
||||
for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
|
||||
INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
|
||||
}
|
||||
|
||||
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -780,17 +744,3 @@ void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
|
|||
cleanup_srcu_struct(&ibdev->mr_srcu);
|
||||
}
|
||||
|
||||
int __init mlx5_ib_odp_init(void)
|
||||
{
|
||||
mlx5_ib_page_fault_wq = alloc_ordered_workqueue("mlx5_ib_page_faults",
|
||||
WQ_MEM_RECLAIM);
|
||||
if (!mlx5_ib_page_fault_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_ib_odp_cleanup(void)
|
||||
{
|
||||
destroy_workqueue(mlx5_ib_page_fault_wq);
|
||||
}
|
||||
|
|
|
@ -475,60 +475,53 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int first_med_uuar(void)
|
||||
static int first_med_bfreg(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int next_uuar(int n)
|
||||
enum {
|
||||
/* this is the first blue flame register in the array of bfregs assigned
|
||||
* to a processes. Since we do not use it for blue flame but rather
|
||||
* regular 64 bit doorbells, we do not need a lock for maintaiing
|
||||
* "odd/even" order
|
||||
*/
|
||||
NUM_NON_BLUE_FLAME_BFREGS = 1,
|
||||
};
|
||||
|
||||
static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
|
||||
{
|
||||
n++;
|
||||
|
||||
while (((n % 4) & 2))
|
||||
n++;
|
||||
|
||||
return n;
|
||||
return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
|
||||
}
|
||||
|
||||
static int num_med_uuar(struct mlx5_uuar_info *uuari)
|
||||
static int num_med_bfreg(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi)
|
||||
{
|
||||
int n;
|
||||
|
||||
n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
|
||||
uuari->num_low_latency_uuars - 1;
|
||||
n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
|
||||
NUM_NON_BLUE_FLAME_BFREGS;
|
||||
|
||||
return n >= 0 ? n : 0;
|
||||
}
|
||||
|
||||
static int max_uuari(struct mlx5_uuar_info *uuari)
|
||||
{
|
||||
return uuari->num_uars * 4;
|
||||
}
|
||||
|
||||
static int first_hi_uuar(struct mlx5_uuar_info *uuari)
|
||||
static int first_hi_bfreg(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi)
|
||||
{
|
||||
int med;
|
||||
int i;
|
||||
int t;
|
||||
|
||||
med = num_med_uuar(uuari);
|
||||
for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
|
||||
t++;
|
||||
if (t == med)
|
||||
return next_uuar(i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
med = num_med_bfreg(dev, bfregi);
|
||||
return ++med;
|
||||
}
|
||||
|
||||
static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
|
||||
static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
|
||||
if (!test_bit(i, uuari->bitmap)) {
|
||||
set_bit(i, uuari->bitmap);
|
||||
uuari->count[i]++;
|
||||
for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
|
||||
if (!bfregi->count[i]) {
|
||||
bfregi->count[i]++;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
@ -536,87 +529,61 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
|
||||
static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi)
|
||||
{
|
||||
int minidx = first_med_uuar();
|
||||
int minidx = first_med_bfreg();
|
||||
int i;
|
||||
|
||||
for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
|
||||
if (uuari->count[i] < uuari->count[minidx])
|
||||
for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) {
|
||||
if (bfregi->count[i] < bfregi->count[minidx])
|
||||
minidx = i;
|
||||
if (!bfregi->count[minidx])
|
||||
break;
|
||||
}
|
||||
|
||||
uuari->count[minidx]++;
|
||||
bfregi->count[minidx]++;
|
||||
return minidx;
|
||||
}
|
||||
|
||||
static int alloc_uuar(struct mlx5_uuar_info *uuari,
|
||||
enum mlx5_ib_latency_class lat)
|
||||
static int alloc_bfreg(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi,
|
||||
enum mlx5_ib_latency_class lat)
|
||||
{
|
||||
int uuarn = -EINVAL;
|
||||
int bfregn = -EINVAL;
|
||||
|
||||
mutex_lock(&uuari->lock);
|
||||
mutex_lock(&bfregi->lock);
|
||||
switch (lat) {
|
||||
case MLX5_IB_LATENCY_CLASS_LOW:
|
||||
uuarn = 0;
|
||||
uuari->count[uuarn]++;
|
||||
BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
|
||||
bfregn = 0;
|
||||
bfregi->count[bfregn]++;
|
||||
break;
|
||||
|
||||
case MLX5_IB_LATENCY_CLASS_MEDIUM:
|
||||
if (uuari->ver < 2)
|
||||
uuarn = -ENOMEM;
|
||||
if (bfregi->ver < 2)
|
||||
bfregn = -ENOMEM;
|
||||
else
|
||||
uuarn = alloc_med_class_uuar(uuari);
|
||||
bfregn = alloc_med_class_bfreg(dev, bfregi);
|
||||
break;
|
||||
|
||||
case MLX5_IB_LATENCY_CLASS_HIGH:
|
||||
if (uuari->ver < 2)
|
||||
uuarn = -ENOMEM;
|
||||
if (bfregi->ver < 2)
|
||||
bfregn = -ENOMEM;
|
||||
else
|
||||
uuarn = alloc_high_class_uuar(uuari);
|
||||
break;
|
||||
|
||||
case MLX5_IB_LATENCY_CLASS_FAST_PATH:
|
||||
uuarn = 2;
|
||||
bfregn = alloc_high_class_bfreg(dev, bfregi);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&uuari->lock);
|
||||
mutex_unlock(&bfregi->lock);
|
||||
|
||||
return uuarn;
|
||||
return bfregn;
|
||||
}
|
||||
|
||||
static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
|
||||
static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
|
||||
{
|
||||
clear_bit(uuarn, uuari->bitmap);
|
||||
--uuari->count[uuarn];
|
||||
}
|
||||
|
||||
static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
|
||||
{
|
||||
clear_bit(uuarn, uuari->bitmap);
|
||||
--uuari->count[uuarn];
|
||||
}
|
||||
|
||||
static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
|
||||
{
|
||||
int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
|
||||
int high_uuar = nuuars - uuari->num_low_latency_uuars;
|
||||
|
||||
mutex_lock(&uuari->lock);
|
||||
if (uuarn == 0) {
|
||||
--uuari->count[uuarn];
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (uuarn < high_uuar) {
|
||||
free_med_class_uuar(uuari, uuarn);
|
||||
goto out;
|
||||
}
|
||||
|
||||
free_high_class_uuar(uuari, uuarn);
|
||||
|
||||
out:
|
||||
mutex_unlock(&uuari->lock);
|
||||
mutex_lock(&bfregi->lock);
|
||||
bfregi->count[bfregn]--;
|
||||
mutex_unlock(&bfregi->lock);
|
||||
}
|
||||
|
||||
static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
|
||||
|
@ -657,9 +624,20 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
|
|||
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
|
||||
struct mlx5_ib_cq *recv_cq);
|
||||
|
||||
static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
|
||||
static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi, int bfregn)
|
||||
{
|
||||
return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
|
||||
int bfregs_per_sys_page;
|
||||
int index_of_sys_page;
|
||||
int offset;
|
||||
|
||||
bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
|
||||
MLX5_NON_FP_BFREGS_PER_UAR;
|
||||
index_of_sys_page = bfregn / bfregs_per_sys_page;
|
||||
|
||||
offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
|
||||
|
||||
return bfregi->sys_pages[index_of_sys_page] + offset;
|
||||
}
|
||||
|
||||
static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
|
||||
|
@ -762,6 +740,13 @@ err_umem:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int adjust_bfregn(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi, int bfregn)
|
||||
{
|
||||
return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
|
||||
bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
|
||||
}
|
||||
|
||||
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
struct mlx5_ib_qp *qp, struct ib_udata *udata,
|
||||
struct ib_qp_init_attr *attr,
|
||||
|
@ -776,7 +761,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
int uar_index;
|
||||
int npages;
|
||||
u32 offset = 0;
|
||||
int uuarn;
|
||||
int bfregn;
|
||||
int ncont = 0;
|
||||
__be64 *pas;
|
||||
void *qpc;
|
||||
|
@ -794,27 +779,27 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
*/
|
||||
if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
|
||||
/* In CROSS_CHANNEL CQ and QP must use the same UAR */
|
||||
uuarn = MLX5_CROSS_CHANNEL_UUAR;
|
||||
bfregn = MLX5_CROSS_CHANNEL_BFREG;
|
||||
else {
|
||||
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
|
||||
if (uuarn < 0) {
|
||||
mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
|
||||
bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
|
||||
if (bfregn < 0) {
|
||||
mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n");
|
||||
mlx5_ib_dbg(dev, "reverting to medium latency\n");
|
||||
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
|
||||
if (uuarn < 0) {
|
||||
mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
|
||||
bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM);
|
||||
if (bfregn < 0) {
|
||||
mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n");
|
||||
mlx5_ib_dbg(dev, "reverting to high latency\n");
|
||||
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
|
||||
if (uuarn < 0) {
|
||||
mlx5_ib_warn(dev, "uuar allocation failed\n");
|
||||
return uuarn;
|
||||
bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW);
|
||||
if (bfregn < 0) {
|
||||
mlx5_ib_warn(dev, "bfreg allocation failed\n");
|
||||
return bfregn;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
|
||||
mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
|
||||
uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn);
|
||||
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
|
||||
|
||||
qp->rq.offset = 0;
|
||||
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
|
||||
|
@ -822,7 +807,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
|
||||
err = set_user_buf_size(dev, qp, &ucmd, base, attr);
|
||||
if (err)
|
||||
goto err_uuar;
|
||||
goto err_bfreg;
|
||||
|
||||
if (ucmd.buf_addr && ubuffer->buf_size) {
|
||||
ubuffer->buf_addr = ucmd.buf_addr;
|
||||
|
@ -831,7 +816,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
&ubuffer->umem, &npages, &page_shift,
|
||||
&ncont, &offset);
|
||||
if (err)
|
||||
goto err_uuar;
|
||||
goto err_bfreg;
|
||||
} else {
|
||||
ubuffer->umem = NULL;
|
||||
}
|
||||
|
@ -854,8 +839,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
MLX5_SET(qpc, qpc, page_offset, offset);
|
||||
|
||||
MLX5_SET(qpc, qpc, uar_page, uar_index);
|
||||
resp->uuar_index = uuarn;
|
||||
qp->uuarn = uuarn;
|
||||
resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
|
||||
qp->bfregn = bfregn;
|
||||
|
||||
err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
|
||||
if (err) {
|
||||
|
@ -882,13 +867,13 @@ err_umem:
|
|||
if (ubuffer->umem)
|
||||
ib_umem_release(ubuffer->umem);
|
||||
|
||||
err_uuar:
|
||||
free_uuar(&context->uuari, uuarn);
|
||||
err_bfreg:
|
||||
free_bfreg(dev, &context->bfregi, bfregn);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
|
||||
struct mlx5_ib_qp_base *base)
|
||||
static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base)
|
||||
{
|
||||
struct mlx5_ib_ucontext *context;
|
||||
|
||||
|
@ -896,7 +881,7 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
|
|||
mlx5_ib_db_unmap_user(context, &qp->db);
|
||||
if (base->ubuffer.umem)
|
||||
ib_umem_release(base->ubuffer.umem);
|
||||
free_uuar(&context->uuari, qp->uuarn);
|
||||
free_bfreg(dev, &context->bfregi, qp->bfregn);
|
||||
}
|
||||
|
||||
static int create_kernel_qp(struct mlx5_ib_dev *dev,
|
||||
|
@ -905,14 +890,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
|
|||
u32 **in, int *inlen,
|
||||
struct mlx5_ib_qp_base *base)
|
||||
{
|
||||
enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
|
||||
struct mlx5_uuar_info *uuari;
|
||||
int uar_index;
|
||||
void *qpc;
|
||||
int uuarn;
|
||||
int err;
|
||||
|
||||
uuari = &dev->mdev->priv.uuari;
|
||||
if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
|
||||
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
|
||||
IB_QP_CREATE_IPOIB_UD_LSO |
|
||||
|
@ -920,21 +901,17 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
|
|||
return -EINVAL;
|
||||
|
||||
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
|
||||
lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
|
||||
qp->bf.bfreg = &dev->fp_bfreg;
|
||||
else
|
||||
qp->bf.bfreg = &dev->bfreg;
|
||||
|
||||
uuarn = alloc_uuar(uuari, lc);
|
||||
if (uuarn < 0) {
|
||||
mlx5_ib_dbg(dev, "\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
qp->bf = &uuari->bfs[uuarn];
|
||||
uar_index = qp->bf->uar->index;
|
||||
qp->bf.buf_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
|
||||
uar_index = qp->bf.bfreg->index;
|
||||
|
||||
err = calc_sq_size(dev, init_attr, qp);
|
||||
if (err < 0) {
|
||||
mlx5_ib_dbg(dev, "err %d\n", err);
|
||||
goto err_uuar;
|
||||
return err;
|
||||
}
|
||||
|
||||
qp->rq.offset = 0;
|
||||
|
@ -944,7 +921,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
|
|||
err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf);
|
||||
if (err) {
|
||||
mlx5_ib_dbg(dev, "err %d\n", err);
|
||||
goto err_uuar;
|
||||
return err;
|
||||
}
|
||||
|
||||
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
|
||||
|
@ -994,34 +971,30 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
|
|||
return 0;
|
||||
|
||||
err_wrid:
|
||||
mlx5_db_free(dev->mdev, &qp->db);
|
||||
kfree(qp->sq.wqe_head);
|
||||
kfree(qp->sq.w_list);
|
||||
kfree(qp->sq.wrid);
|
||||
kfree(qp->sq.wr_data);
|
||||
kfree(qp->rq.wrid);
|
||||
mlx5_db_free(dev->mdev, &qp->db);
|
||||
|
||||
err_free:
|
||||
kvfree(*in);
|
||||
|
||||
err_buf:
|
||||
mlx5_buf_free(dev->mdev, &qp->buf);
|
||||
|
||||
err_uuar:
|
||||
free_uuar(&dev->mdev->priv.uuari, uuarn);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
||||
{
|
||||
mlx5_db_free(dev->mdev, &qp->db);
|
||||
kfree(qp->sq.wqe_head);
|
||||
kfree(qp->sq.w_list);
|
||||
kfree(qp->sq.wrid);
|
||||
kfree(qp->sq.wr_data);
|
||||
kfree(qp->rq.wrid);
|
||||
mlx5_db_free(dev->mdev, &qp->db);
|
||||
mlx5_buf_free(dev->mdev, &qp->buf);
|
||||
free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
|
||||
}
|
||||
|
||||
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
|
||||
|
@ -1353,7 +1326,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||
if (init_attr->create_flags || init_attr->send_cq)
|
||||
return -EINVAL;
|
||||
|
||||
min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index);
|
||||
min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
|
||||
if (udata->outlen < min_resp_len)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1526,9 +1499,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
&qp->raw_packet_qp.rq.base :
|
||||
&qp->trans_qp.base;
|
||||
|
||||
if (init_attr->qp_type != IB_QPT_RAW_PACKET)
|
||||
mlx5_ib_odp_create_qp(qp);
|
||||
|
||||
mutex_init(&qp->mutex);
|
||||
spin_lock_init(&qp->sq.lock);
|
||||
spin_lock_init(&qp->rq.lock);
|
||||
|
@ -1795,7 +1765,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
|
||||
err_create:
|
||||
if (qp->create_type == MLX5_QP_USER)
|
||||
destroy_qp_user(pd, qp, base);
|
||||
destroy_qp_user(dev, pd, qp, base);
|
||||
else if (qp->create_type == MLX5_QP_KERNEL)
|
||||
destroy_qp_kernel(dev, qp);
|
||||
|
||||
|
@ -1923,7 +1893,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
|||
|
||||
if (qp->state != IB_QPS_RESET) {
|
||||
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
|
||||
mlx5_ib_qp_disable_pagefaults(qp);
|
||||
err = mlx5_core_qp_modify(dev->mdev,
|
||||
MLX5_CMD_OP_2RST_QP, 0,
|
||||
NULL, &base->mqp);
|
||||
|
@ -1974,7 +1943,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
|||
if (qp->create_type == MLX5_QP_KERNEL)
|
||||
destroy_qp_kernel(dev, qp);
|
||||
else if (qp->create_type == MLX5_QP_USER)
|
||||
destroy_qp_user(&get_pd(qp)->ibpd, qp, base);
|
||||
destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base);
|
||||
}
|
||||
|
||||
static const char *ib_qp_type_str(enum ib_qp_type type)
|
||||
|
@ -2823,16 +2792,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
|||
if (mlx5_st < 0)
|
||||
goto out;
|
||||
|
||||
/* If moving to a reset or error state, we must disable page faults on
|
||||
* this QP and flush all current page faults. Otherwise a stale page
|
||||
* fault may attempt to work on this QP after it is reset and moved
|
||||
* again to RTS, and may cause the driver and the device to get out of
|
||||
* sync. */
|
||||
if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
|
||||
(new_state == IB_QPS_RESET || new_state == IB_QPS_ERR) &&
|
||||
(qp->ibqp.qp_type != IB_QPT_RAW_PACKET))
|
||||
mlx5_ib_qp_disable_pagefaults(qp);
|
||||
|
||||
if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
|
||||
!optab[mlx5_cur][mlx5_new])
|
||||
goto out;
|
||||
|
@ -2864,10 +2823,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT &&
|
||||
(qp->ibqp.qp_type != IB_QPT_RAW_PACKET))
|
||||
mlx5_ib_qp_enable_pagefaults(qp);
|
||||
|
||||
qp->state = new_state;
|
||||
|
||||
if (attr_mask & IB_QP_ACCESS_FLAGS)
|
||||
|
@ -3029,20 +2984,20 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
|
|||
|
||||
if (wr->opcode == IB_WR_LSO) {
|
||||
struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
|
||||
int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start);
|
||||
int size_of_inl_hdr_start = sizeof(eseg->inline_hdr.start);
|
||||
u64 left, leftlen, copysz;
|
||||
void *pdata = ud_wr->header;
|
||||
|
||||
left = ud_wr->hlen;
|
||||
eseg->mss = cpu_to_be16(ud_wr->mss);
|
||||
eseg->inline_hdr_sz = cpu_to_be16(left);
|
||||
eseg->inline_hdr.sz = cpu_to_be16(left);
|
||||
|
||||
/*
|
||||
* check if there is space till the end of queue, if yes,
|
||||
* copy all in one shot, otherwise copy till the end of queue,
|
||||
* rollback and than the copy the left
|
||||
*/
|
||||
leftlen = qend - (void *)eseg->inline_hdr_start;
|
||||
leftlen = qend - (void *)eseg->inline_hdr.start;
|
||||
copysz = min_t(u64, leftlen, left);
|
||||
|
||||
memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
|
||||
|
@ -3080,9 +3035,10 @@ static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
|
|||
dseg->addr = cpu_to_be64(sg->addr);
|
||||
}
|
||||
|
||||
static __be16 get_klm_octo(int npages)
|
||||
static u64 get_xlt_octo(u64 bytes)
|
||||
{
|
||||
return cpu_to_be16(ALIGN(npages, 8) / 2);
|
||||
return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
|
||||
MLX5_IB_UMR_OCTOWORD;
|
||||
}
|
||||
|
||||
static __be64 frwr_mkey_mask(void)
|
||||
|
@ -3127,18 +3083,14 @@ static __be64 sig_mkey_mask(void)
|
|||
}
|
||||
|
||||
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
struct mlx5_ib_mr *mr)
|
||||
struct mlx5_ib_mr *mr)
|
||||
{
|
||||
int ndescs = mr->ndescs;
|
||||
int size = mr->ndescs * mr->desc_size;
|
||||
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
|
||||
if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
|
||||
/* KLMs take twice the size of MTTs */
|
||||
ndescs *= 2;
|
||||
|
||||
umr->flags = MLX5_UMR_CHECK_NOT_FREE;
|
||||
umr->klm_octowords = get_klm_octo(ndescs);
|
||||
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
|
||||
umr->mkey_mask = frwr_mkey_mask();
|
||||
}
|
||||
|
||||
|
@ -3149,37 +3101,17 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
|
|||
umr->flags = MLX5_UMR_INLINE;
|
||||
}
|
||||
|
||||
static __be64 get_umr_reg_mr_mask(int atomic)
|
||||
static __be64 get_umr_enable_mr_mask(void)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
result = MLX5_MKEY_MASK_LEN |
|
||||
MLX5_MKEY_MASK_PAGE_SIZE |
|
||||
MLX5_MKEY_MASK_START_ADDR |
|
||||
MLX5_MKEY_MASK_PD |
|
||||
MLX5_MKEY_MASK_LR |
|
||||
MLX5_MKEY_MASK_LW |
|
||||
MLX5_MKEY_MASK_KEY |
|
||||
MLX5_MKEY_MASK_RR |
|
||||
MLX5_MKEY_MASK_RW |
|
||||
result = MLX5_MKEY_MASK_KEY |
|
||||
MLX5_MKEY_MASK_FREE;
|
||||
|
||||
if (atomic)
|
||||
result |= MLX5_MKEY_MASK_A;
|
||||
|
||||
return cpu_to_be64(result);
|
||||
}
|
||||
|
||||
static __be64 get_umr_unreg_mr_mask(void)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
result = MLX5_MKEY_MASK_FREE;
|
||||
|
||||
return cpu_to_be64(result);
|
||||
}
|
||||
|
||||
static __be64 get_umr_update_mtt_mask(void)
|
||||
static __be64 get_umr_disable_mr_mask(void)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
|
@ -3194,23 +3126,22 @@ static __be64 get_umr_update_translation_mask(void)
|
|||
|
||||
result = MLX5_MKEY_MASK_LEN |
|
||||
MLX5_MKEY_MASK_PAGE_SIZE |
|
||||
MLX5_MKEY_MASK_START_ADDR |
|
||||
MLX5_MKEY_MASK_KEY |
|
||||
MLX5_MKEY_MASK_FREE;
|
||||
MLX5_MKEY_MASK_START_ADDR;
|
||||
|
||||
return cpu_to_be64(result);
|
||||
}
|
||||
|
||||
static __be64 get_umr_update_access_mask(void)
|
||||
static __be64 get_umr_update_access_mask(int atomic)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
result = MLX5_MKEY_MASK_LW |
|
||||
result = MLX5_MKEY_MASK_LR |
|
||||
MLX5_MKEY_MASK_LW |
|
||||
MLX5_MKEY_MASK_RR |
|
||||
MLX5_MKEY_MASK_RW |
|
||||
MLX5_MKEY_MASK_A |
|
||||
MLX5_MKEY_MASK_KEY |
|
||||
MLX5_MKEY_MASK_FREE;
|
||||
MLX5_MKEY_MASK_RW;
|
||||
|
||||
if (atomic)
|
||||
result |= MLX5_MKEY_MASK_A;
|
||||
|
||||
return cpu_to_be64(result);
|
||||
}
|
||||
|
@ -3219,9 +3150,7 @@ static __be64 get_umr_update_pd_mask(void)
|
|||
{
|
||||
u64 result;
|
||||
|
||||
result = MLX5_MKEY_MASK_PD |
|
||||
MLX5_MKEY_MASK_KEY |
|
||||
MLX5_MKEY_MASK_FREE;
|
||||
result = MLX5_MKEY_MASK_PD;
|
||||
|
||||
return cpu_to_be64(result);
|
||||
}
|
||||
|
@ -3238,24 +3167,24 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
|||
else
|
||||
umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
|
||||
|
||||
if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
|
||||
umr->klm_octowords = get_klm_octo(umrwr->npages);
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
|
||||
umr->mkey_mask = get_umr_update_mtt_mask();
|
||||
umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
|
||||
umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
|
||||
}
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
|
||||
umr->mkey_mask |= get_umr_update_translation_mask();
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS)
|
||||
umr->mkey_mask |= get_umr_update_access_mask();
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
|
||||
umr->mkey_mask |= get_umr_update_pd_mask();
|
||||
if (!umr->mkey_mask)
|
||||
umr->mkey_mask = get_umr_reg_mr_mask(atomic);
|
||||
} else {
|
||||
umr->mkey_mask = get_umr_unreg_mr_mask();
|
||||
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
|
||||
u64 offset = get_xlt_octo(umrwr->offset);
|
||||
|
||||
umr->xlt_offset = cpu_to_be16(offset & 0xffff);
|
||||
umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
|
||||
umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
|
||||
}
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
|
||||
umr->mkey_mask |= get_umr_update_translation_mask();
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
|
||||
umr->mkey_mask |= get_umr_update_access_mask(atomic);
|
||||
umr->mkey_mask |= get_umr_update_pd_mask();
|
||||
}
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
|
||||
umr->mkey_mask |= get_umr_enable_mr_mask();
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
|
||||
umr->mkey_mask |= get_umr_disable_mr_mask();
|
||||
|
||||
if (!wr->num_sge)
|
||||
umr->flags |= MLX5_UMR_INLINE;
|
||||
|
@ -3303,17 +3232,17 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w
|
|||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
memset(seg, 0, sizeof(*seg));
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
|
||||
seg->status = MLX5_MKEY_STATUS_FREE;
|
||||
return;
|
||||
}
|
||||
|
||||
seg->flags = convert_access(umrwr->access_flags);
|
||||
if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
|
||||
if (umrwr->pd)
|
||||
seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
|
||||
seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
|
||||
}
|
||||
if (umrwr->pd)
|
||||
seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
|
||||
!umrwr->length)
|
||||
seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
|
||||
|
||||
seg->start_addr = cpu_to_be64(umrwr->virt_addr);
|
||||
seg->len = cpu_to_be64(umrwr->length);
|
||||
seg->log2_page_size = umrwr->page_shift;
|
||||
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
|
||||
|
@ -3611,7 +3540,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
|
|||
}
|
||||
|
||||
static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
|
||||
struct ib_sig_handover_wr *wr, u32 nelements,
|
||||
struct ib_sig_handover_wr *wr, u32 size,
|
||||
u32 length, u32 pdn)
|
||||
{
|
||||
struct ib_mr *sig_mr = wr->sig_mr;
|
||||
|
@ -3626,17 +3555,17 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
|
|||
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
|
||||
MLX5_MKEY_BSF_EN | pdn);
|
||||
seg->len = cpu_to_be64(length);
|
||||
seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
|
||||
seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
|
||||
seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
|
||||
}
|
||||
|
||||
static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
u32 nelements)
|
||||
u32 size)
|
||||
{
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
|
||||
umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
|
||||
umr->klm_octowords = get_klm_octo(nelements);
|
||||
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
|
||||
umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
|
||||
umr->mkey_mask = sig_mkey_mask();
|
||||
}
|
||||
|
@ -3648,7 +3577,7 @@ static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
|
|||
struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
|
||||
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
|
||||
u32 pdn = get_pd(qp)->pdn;
|
||||
u32 klm_oct_size;
|
||||
u32 xlt_size;
|
||||
int region_len, ret;
|
||||
|
||||
if (unlikely(wr->wr.num_sge != 1) ||
|
||||
|
@ -3670,15 +3599,15 @@ static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
|
|||
* then we use strided block format (3 octowords),
|
||||
* else we use single KLM (1 octoword)
|
||||
**/
|
||||
klm_oct_size = wr->prot ? 3 : 1;
|
||||
xlt_size = wr->prot ? 0x30 : sizeof(struct mlx5_klm);
|
||||
|
||||
set_sig_umr_segment(*seg, klm_oct_size);
|
||||
set_sig_umr_segment(*seg, xlt_size);
|
||||
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
|
||||
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
|
||||
if (unlikely((*seg == qp->sq.qend)))
|
||||
*seg = mlx5_get_send_wqe(qp, 0);
|
||||
|
||||
set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
|
||||
set_sig_mkey_segment(*seg, wr, xlt_size, region_len, pdn);
|
||||
*seg += sizeof(struct mlx5_mkey_seg);
|
||||
*size += sizeof(struct mlx5_mkey_seg) / 16;
|
||||
if (unlikely((*seg == qp->sq.qend)))
|
||||
|
@ -3784,24 +3713,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
|
|||
}
|
||||
}
|
||||
|
||||
static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
|
||||
unsigned bytecnt, struct mlx5_ib_qp *qp)
|
||||
{
|
||||
while (bytecnt > 0) {
|
||||
__iowrite64_copy(dst++, src++, 8);
|
||||
__iowrite64_copy(dst++, src++, 8);
|
||||
__iowrite64_copy(dst++, src++, 8);
|
||||
__iowrite64_copy(dst++, src++, 8);
|
||||
__iowrite64_copy(dst++, src++, 8);
|
||||
__iowrite64_copy(dst++, src++, 8);
|
||||
__iowrite64_copy(dst++, src++, 8);
|
||||
__iowrite64_copy(dst++, src++, 8);
|
||||
bytecnt -= 64;
|
||||
if (unlikely(src == qp->sq.qend))
|
||||
src = mlx5_get_send_wqe(qp, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static u8 get_fence(u8 fence, struct ib_send_wr *wr)
|
||||
{
|
||||
if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
|
||||
|
@ -3897,7 +3808,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
|
||||
|
||||
qp = to_mqp(ibqp);
|
||||
bf = qp->bf;
|
||||
bf = &qp->bf;
|
||||
qend = qp->sq.qend;
|
||||
|
||||
spin_lock_irqsave(&qp->sq.lock, flags);
|
||||
|
@ -4170,28 +4081,13 @@ out:
|
|||
* we hit doorbell */
|
||||
wmb();
|
||||
|
||||
if (bf->need_lock)
|
||||
spin_lock(&bf->lock);
|
||||
else
|
||||
__acquire(&bf->lock);
|
||||
|
||||
/* TBD enable WC */
|
||||
if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
|
||||
mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
|
||||
/* wc_wmb(); */
|
||||
} else {
|
||||
mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
|
||||
MLX5_GET_DOORBELL_LOCK(&bf->lock32));
|
||||
/* Make sure doorbells don't leak out of SQ spinlock
|
||||
* and reach the HCA out of order.
|
||||
*/
|
||||
mmiowb();
|
||||
}
|
||||
/* currently we support only regular doorbells */
|
||||
mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL);
|
||||
/* Make sure doorbells don't leak out of SQ spinlock
|
||||
* and reach the HCA out of order.
|
||||
*/
|
||||
mmiowb();
|
||||
bf->offset ^= bf->buf_size;
|
||||
if (bf->need_lock)
|
||||
spin_unlock(&bf->lock);
|
||||
else
|
||||
__release(&bf->lock);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&qp->sq.lock, flags);
|
||||
|
@ -4559,14 +4455,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
|||
return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
|
||||
qp_init_attr);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
/*
|
||||
* Wait for any outstanding page faults, in case the user frees memory
|
||||
* based upon this query's result.
|
||||
*/
|
||||
flush_workqueue(mlx5_ib_page_fault_wq);
|
||||
#endif
|
||||
|
||||
mutex_lock(&qp->mutex);
|
||||
|
||||
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
|
||||
|
|
|
@ -118,7 +118,7 @@ static struct device *dma_device(struct rxe_dev *rxe)
|
|||
|
||||
ndev = rxe->ndev;
|
||||
|
||||
if (ndev->priv_flags & IFF_802_1Q_VLAN)
|
||||
if (is_vlan_dev(ndev))
|
||||
ndev = vlan_dev_real_dev(ndev);
|
||||
|
||||
return ndev->dev.parent;
|
||||
|
|
|
@ -147,7 +147,7 @@ static word plci_remove_check(PLCI *);
|
|||
static void listen_check(DIVA_CAPI_ADAPTER *);
|
||||
static byte AddInfo(byte **, byte **, byte *, byte *);
|
||||
static byte getChannel(API_PARSE *);
|
||||
static void IndParse(PLCI *, word *, byte **, byte);
|
||||
static void IndParse(PLCI *, const word *, byte **, byte);
|
||||
static byte ie_compare(byte *, byte *);
|
||||
static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
|
||||
static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
|
||||
|
@ -4858,7 +4858,7 @@ static void sig_ind(PLCI *plci)
|
|||
/* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
|
||||
/* SMSG is situated at the end because its 0 (for compatibility reasons */
|
||||
/* (see Info_Mask Bit 4, first IE. then the message type) */
|
||||
word parms_id[] =
|
||||
static const word parms_id[] =
|
||||
{MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
|
||||
UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
|
||||
RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
|
||||
|
@ -4866,12 +4866,12 @@ static void sig_ind(PLCI *plci)
|
|||
/* 14 FTY repl by ESC_CHI */
|
||||
/* 18 PI repl by ESC_LAW */
|
||||
/* removed OAD changed to 0xff for future use, OAD is multiIE now */
|
||||
word multi_fac_id[] = {1, FTY};
|
||||
word multi_pi_id[] = {1, PI};
|
||||
word multi_CiPN_id[] = {1, OAD};
|
||||
word multi_ssext_id[] = {1, ESC_SSEXT};
|
||||
static const word multi_fac_id[] = {1, FTY};
|
||||
static const word multi_pi_id[] = {1, PI};
|
||||
static const word multi_CiPN_id[] = {1, OAD};
|
||||
static const word multi_ssext_id[] = {1, ESC_SSEXT};
|
||||
|
||||
word multi_vswitch_id[] = {1, ESC_VSWITCH};
|
||||
static const word multi_vswitch_id[] = {1, ESC_VSWITCH};
|
||||
|
||||
byte *cau;
|
||||
word ncci;
|
||||
|
@ -8924,7 +8924,7 @@ static void listen_check(DIVA_CAPI_ADAPTER *a)
|
|||
/* functions for all parameters sent in INDs */
|
||||
/*------------------------------------------------------------------*/
|
||||
|
||||
static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize)
|
||||
static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize)
|
||||
{
|
||||
word ploc; /* points to current location within packet */
|
||||
byte w;
|
||||
|
|
|
@ -135,6 +135,7 @@ config MACVTAP
|
|||
tristate "MAC-VLAN based tap driver"
|
||||
depends on MACVLAN
|
||||
depends on INET
|
||||
select TAP
|
||||
help
|
||||
This adds a specialized tap character device driver that is based
|
||||
on the MAC-VLAN network interface, called macvtap. A macvtap device
|
||||
|
@ -165,11 +166,25 @@ config IPVLAN
|
|||
To compile this driver as a module, choose M here: the module
|
||||
will be called ipvlan.
|
||||
|
||||
config IPVTAP
|
||||
tristate "IP-VLAN based tap driver"
|
||||
depends on IPVLAN
|
||||
depends on INET
|
||||
select TAP
|
||||
---help---
|
||||
This adds a specialized tap character device driver that is based
|
||||
on the IP-VLAN network interface, called ipvtap. An ipvtap device
|
||||
can be added in the same way as a ipvlan device, using 'type
|
||||
ipvtap', and then be accessed through the tap user space interface.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called ipvtap.
|
||||
|
||||
config VXLAN
|
||||
tristate "Virtual eXtensible Local Area Network (VXLAN)"
|
||||
depends on INET
|
||||
select NET_UDP_TUNNEL
|
||||
select GRO_CELLS
|
||||
---help---
|
||||
This allows one to create vxlan virtual interfaces that provide
|
||||
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
|
||||
|
@ -184,6 +199,7 @@ config GENEVE
|
|||
tristate "Generic Network Virtualization Encapsulation"
|
||||
depends on INET && NET_UDP_TUNNEL
|
||||
select NET_IP_TUNNEL
|
||||
select GRO_CELLS
|
||||
---help---
|
||||
This allows one to create geneve virtual interfaces that provide
|
||||
Layer 2 Networks over Layer 3 Networks. GENEVE is often used
|
||||
|
@ -216,6 +232,7 @@ config MACSEC
|
|||
select CRYPTO
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_GCM
|
||||
select GRO_CELLS
|
||||
---help---
|
||||
MACsec is an encryption standard for Ethernet.
|
||||
|
||||
|
@ -284,6 +301,12 @@ config TUN
|
|||
|
||||
If you don't know what to use this for, you don't need it.
|
||||
|
||||
config TAP
|
||||
tristate
|
||||
---help---
|
||||
This option is selected by any driver implementing tap user space
|
||||
interface for a virtual interface to re-use core tap functionality.
|
||||
|
||||
config TUN_VNET_CROSS_LE
|
||||
bool "Support for cross-endian vnet headers on little-endian kernels"
|
||||
default n
|
||||
|
@ -437,6 +460,9 @@ config XEN_NETDEV_BACKEND
|
|||
config VMXNET3
|
||||
tristate "VMware VMXNET3 ethernet driver"
|
||||
depends on PCI && INET
|
||||
depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
|
||||
IA64_PAGE_SIZE_64KB || MICROBLAZE_64K_PAGES || \
|
||||
PARISC_PAGE_SIZE_64KB || PPC_64K_PAGES)
|
||||
help
|
||||
This driver supports VMware's vmxnet3 virtual ethernet NIC.
|
||||
To compile this driver as a module, choose M here: the
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#
|
||||
obj-$(CONFIG_BONDING) += bonding/
|
||||
obj-$(CONFIG_IPVLAN) += ipvlan/
|
||||
obj-$(CONFIG_IPVTAP) += ipvlan/
|
||||
obj-$(CONFIG_DUMMY) += dummy.o
|
||||
obj-$(CONFIG_EQUALIZER) += eql.o
|
||||
obj-$(CONFIG_IFB) += ifb.o
|
||||
|
@ -21,6 +22,7 @@ obj-$(CONFIG_PHYLIB) += phy/
|
|||
obj-$(CONFIG_RIONET) += rionet.o
|
||||
obj-$(CONFIG_NET_TEAM) += team/
|
||||
obj-$(CONFIG_TUN) += tun.o
|
||||
obj-$(CONFIG_TAP) += tap.o
|
||||
obj-$(CONFIG_VETH) += veth.o
|
||||
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
|
||||
obj-$(CONFIG_VXLAN) += vxlan.o
|
||||
|
|
|
@ -211,8 +211,8 @@ static int lacp_fast;
|
|||
|
||||
static int bond_init(struct net_device *bond_dev);
|
||||
static void bond_uninit(struct net_device *bond_dev);
|
||||
static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
|
||||
struct rtnl_link_stats64 *stats);
|
||||
static void bond_get_stats(struct net_device *bond_dev,
|
||||
struct rtnl_link_stats64 *stats);
|
||||
static void bond_slave_arr_handler(struct work_struct *work);
|
||||
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
|
||||
int mod);
|
||||
|
@ -1993,11 +1993,10 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
|
||||
static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
|
||||
{
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
bond_fill_ifbond(bond, info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
|
||||
|
@ -3337,8 +3336,8 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
|
|||
}
|
||||
}
|
||||
|
||||
static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
static void bond_get_stats(struct net_device *bond_dev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
struct rtnl_link_stats64 temp;
|
||||
|
@ -3362,8 +3361,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
|
|||
|
||||
memcpy(&bond->bond_stats, stats, sizeof(*stats));
|
||||
spin_unlock(&bond->stats_lock);
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
|
||||
|
@ -3411,12 +3408,11 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
|
|||
if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
|
||||
return -EFAULT;
|
||||
|
||||
res = bond_info_query(bond_dev, &k_binfo);
|
||||
if (res == 0 &&
|
||||
copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
|
||||
bond_info_query(bond_dev, &k_binfo);
|
||||
if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
|
||||
return -EFAULT;
|
||||
|
||||
return res;
|
||||
return 0;
|
||||
case BOND_SLAVE_INFO_QUERY_OLD:
|
||||
case SIOCBONDSLAVEINFOQUERY:
|
||||
u_sinfo = (struct ifslave __user *)ifr->ifr_data;
|
||||
|
@ -4149,8 +4145,6 @@ static const struct net_device_ops bond_netdev_ops = {
|
|||
.ndo_add_slave = bond_enslave,
|
||||
.ndo_del_slave = bond_release,
|
||||
.ndo_fix_features = bond_fix_features,
|
||||
.ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
|
||||
.ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
|
||||
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
|
||||
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
|
||||
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
|
||||
|
|
|
@ -6,7 +6,8 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
|
|||
obj-$(CONFIG_CAN_SLCAN) += slcan.o
|
||||
|
||||
obj-$(CONFIG_CAN_DEV) += can-dev.o
|
||||
can-dev-y := dev.o
|
||||
can-dev-y += dev.o
|
||||
can-dev-y += rx-offload.o
|
||||
|
||||
can-dev-$(CONFIG_CAN_LEDS) += led.o
|
||||
|
||||
|
|
|
@ -813,7 +813,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
|
|||
u32 reg_ier = AT91_IRQ_ERR_FRAME;
|
||||
reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
|
||||
|
||||
napi_complete(napi);
|
||||
napi_complete_done(napi, work_done);
|
||||
at91_write(priv, AT91_IER, reg_ier);
|
||||
}
|
||||
|
||||
|
|
|
@ -1070,7 +1070,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
|
|||
|
||||
end:
|
||||
if (work_done < quota) {
|
||||
napi_complete(napi);
|
||||
napi_complete_done(napi, work_done);
|
||||
/* enable all IRQs if we are not in bus off state */
|
||||
if (priv->can.state != CAN_STATE_BUS_OFF)
|
||||
c_can_irq_control(priv, true);
|
||||
|
|
|
@ -279,25 +279,45 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Checks the validity of predefined bitrate settings */
|
||||
static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
|
||||
const u32 *bitrate_const,
|
||||
const unsigned int bitrate_const_cnt)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < bitrate_const_cnt; i++) {
|
||||
if (bt->bitrate == bitrate_const[i])
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= priv->bitrate_const_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
|
||||
const struct can_bittiming_const *btc)
|
||||
const struct can_bittiming_const *btc,
|
||||
const u32 *bitrate_const,
|
||||
const unsigned int bitrate_const_cnt)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Check if the CAN device has bit-timing parameters */
|
||||
if (!btc)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/*
|
||||
* Depending on the given can_bittiming parameter structure the CAN
|
||||
* timing parameters are calculated based on the provided bitrate OR
|
||||
* alternatively the CAN timing parameters (tq, prop_seg, etc.) are
|
||||
* provided directly which are then checked and fixed up.
|
||||
*/
|
||||
if (!bt->tq && bt->bitrate)
|
||||
if (!bt->tq && bt->bitrate && btc)
|
||||
err = can_calc_bittiming(dev, bt, btc);
|
||||
else if (bt->tq && !bt->bitrate)
|
||||
else if (bt->tq && !bt->bitrate && btc)
|
||||
err = can_fixup_bittiming(dev, bt, btc);
|
||||
else if (!bt->tq && bt->bitrate && bitrate_const)
|
||||
err = can_validate_bitrate(dev, bt, bitrate_const,
|
||||
bitrate_const_cnt);
|
||||
else
|
||||
err = -EINVAL;
|
||||
|
||||
|
@ -872,8 +892,20 @@ static int can_changelink(struct net_device *dev,
|
|||
/* Do not allow changing bittiming while running */
|
||||
if (dev->flags & IFF_UP)
|
||||
return -EBUSY;
|
||||
|
||||
/* Calculate bittiming parameters based on
|
||||
* bittiming_const if set, otherwise pass bitrate
|
||||
* directly via do_set_bitrate(). Bail out if neither
|
||||
* is given.
|
||||
*/
|
||||
if (!priv->bittiming_const && !priv->do_set_bittiming)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
|
||||
err = can_get_bittiming(dev, &bt, priv->bittiming_const);
|
||||
err = can_get_bittiming(dev, &bt,
|
||||
priv->bittiming_const,
|
||||
priv->bitrate_const,
|
||||
priv->bitrate_const_cnt);
|
||||
if (err)
|
||||
return err;
|
||||
memcpy(&priv->bittiming, &bt, sizeof(bt));
|
||||
|
@ -943,9 +975,21 @@ static int can_changelink(struct net_device *dev,
|
|||
/* Do not allow changing bittiming while running */
|
||||
if (dev->flags & IFF_UP)
|
||||
return -EBUSY;
|
||||
|
||||
/* Calculate bittiming parameters based on
|
||||
* data_bittiming_const if set, otherwise pass bitrate
|
||||
* directly via do_set_bitrate(). Bail out if neither
|
||||
* is given.
|
||||
*/
|
||||
if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
|
||||
sizeof(dbt));
|
||||
err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const);
|
||||
err = can_get_bittiming(dev, &dbt,
|
||||
priv->data_bittiming_const,
|
||||
priv->data_bitrate_const,
|
||||
priv->data_bitrate_const_cnt);
|
||||
if (err)
|
||||
return err;
|
||||
memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
|
||||
|
@ -958,6 +1002,30 @@ static int can_changelink(struct net_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
if (data[IFLA_CAN_TERMINATION]) {
|
||||
const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
|
||||
const unsigned int num_term = priv->termination_const_cnt;
|
||||
unsigned int i;
|
||||
|
||||
if (!priv->do_set_termination)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* check whether given value is supported by the interface */
|
||||
for (i = 0; i < num_term; i++) {
|
||||
if (termval == priv->termination_const[i])
|
||||
break;
|
||||
}
|
||||
if (i >= num_term)
|
||||
return -EINVAL;
|
||||
|
||||
/* Finally, set the termination value */
|
||||
err = priv->do_set_termination(dev, termval);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
priv->termination = termval;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -980,6 +1048,17 @@ static size_t can_get_size(const struct net_device *dev)
|
|||
size += nla_total_size(sizeof(struct can_bittiming));
|
||||
if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
|
||||
size += nla_total_size(sizeof(struct can_bittiming_const));
|
||||
if (priv->termination_const) {
|
||||
size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
|
||||
size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
|
||||
priv->termination_const_cnt);
|
||||
}
|
||||
if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
|
||||
size += nla_total_size(sizeof(*priv->bitrate_const) *
|
||||
priv->bitrate_const_cnt);
|
||||
if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
|
||||
size += nla_total_size(sizeof(*priv->data_bitrate_const) *
|
||||
priv->data_bitrate_const_cnt);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
@ -1018,7 +1097,28 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
|||
(priv->data_bittiming_const &&
|
||||
nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
|
||||
sizeof(*priv->data_bittiming_const),
|
||||
priv->data_bittiming_const)))
|
||||
priv->data_bittiming_const)) ||
|
||||
|
||||
(priv->termination_const &&
|
||||
(nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
|
||||
nla_put(skb, IFLA_CAN_TERMINATION_CONST,
|
||||
sizeof(*priv->termination_const) *
|
||||
priv->termination_const_cnt,
|
||||
priv->termination_const))) ||
|
||||
|
||||
(priv->bitrate_const &&
|
||||
nla_put(skb, IFLA_CAN_BITRATE_CONST,
|
||||
sizeof(*priv->bitrate_const) *
|
||||
priv->bitrate_const_cnt,
|
||||
priv->bitrate_const)) ||
|
||||
|
||||
(priv->data_bitrate_const &&
|
||||
nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
|
||||
sizeof(*priv->data_bitrate_const) *
|
||||
priv->data_bitrate_const_cnt,
|
||||
priv->data_bitrate_const))
|
||||
)
|
||||
|
||||
return -EMSGSIZE;
|
||||
|
||||
return 0;
|
||||
|
@ -1073,6 +1173,22 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
|
|||
*/
|
||||
int register_candev(struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* Ensure termination_const, termination_const_cnt and
|
||||
* do_set_termination consistency. All must be either set or
|
||||
* unset.
|
||||
*/
|
||||
if ((!priv->termination_const != !priv->termination_const_cnt) ||
|
||||
(!priv->termination_const != !priv->do_set_termination))
|
||||
return -EINVAL;
|
||||
|
||||
if (!priv->bitrate_const != !priv->bitrate_const_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
dev->rtnl_link_ops = &can_link_ops;
|
||||
return register_netdev(dev);
|
||||
}
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
*
|
||||
* Copyright (c) 2005-2006 Varma Electronics Oy
|
||||
* Copyright (c) 2009 Sascha Hauer, Pengutronix
|
||||
* Copyright (c) 2010 Marc Kleine-Budde, Pengutronix
|
||||
* Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
|
||||
* Copyright (c) 2014 David Jander, Protonic Holland
|
||||
*
|
||||
* Based on code originally by Andrey Volkov <avolkov@varma-el.com>
|
||||
*
|
||||
|
@ -24,6 +25,7 @@
|
|||
#include <linux/can/dev.h>
|
||||
#include <linux/can/error.h>
|
||||
#include <linux/can/led.h>
|
||||
#include <linux/can/rx-offload.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -55,9 +57,10 @@
|
|||
#define FLEXCAN_MCR_WAK_SRC BIT(19)
|
||||
#define FLEXCAN_MCR_DOZE BIT(18)
|
||||
#define FLEXCAN_MCR_SRX_DIS BIT(17)
|
||||
#define FLEXCAN_MCR_BCC BIT(16)
|
||||
#define FLEXCAN_MCR_IRMQ BIT(16)
|
||||
#define FLEXCAN_MCR_LPRIO_EN BIT(13)
|
||||
#define FLEXCAN_MCR_AEN BIT(12)
|
||||
/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */
|
||||
#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f)
|
||||
#define FLEXCAN_MCR_IDAM_A (0x0 << 8)
|
||||
#define FLEXCAN_MCR_IDAM_B (0x1 << 8)
|
||||
|
@ -143,17 +146,20 @@
|
|||
|
||||
/* FLEXCAN interrupt flag register (IFLAG) bits */
|
||||
/* Errata ERR005829 step7: Reserve first valid MB */
|
||||
#define FLEXCAN_TX_BUF_RESERVED 8
|
||||
#define FLEXCAN_TX_BUF_ID 9
|
||||
#define FLEXCAN_IFLAG_BUF(x) BIT(x)
|
||||
#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
|
||||
#define FLEXCAN_TX_MB_OFF_FIFO 9
|
||||
#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
|
||||
#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1
|
||||
#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1)
|
||||
#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63
|
||||
#define FLEXCAN_IFLAG_MB(x) BIT(x)
|
||||
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
|
||||
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
|
||||
#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
|
||||
#define FLEXCAN_IFLAG_DEFAULT \
|
||||
(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \
|
||||
FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID))
|
||||
|
||||
/* FLEXCAN message buffers */
|
||||
#define FLEXCAN_MB_CODE_MASK (0xf << 24)
|
||||
#define FLEXCAN_MB_CODE_RX_BUSY_BIT (0x1 << 24)
|
||||
#define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24)
|
||||
#define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24)
|
||||
#define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24)
|
||||
|
@ -189,7 +195,9 @@
|
|||
*/
|
||||
#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */
|
||||
#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
|
||||
#define FLEXCAN_QUIRK_DISABLE_MECR BIT(3) /* Disble Memory error detection */
|
||||
#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
|
||||
#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disble Memory error detection */
|
||||
#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
|
||||
|
||||
/* Structure of the message buffer */
|
||||
struct flexcan_mb {
|
||||
|
@ -213,7 +221,10 @@ struct flexcan_regs {
|
|||
u32 imask1; /* 0x28 */
|
||||
u32 iflag2; /* 0x2c */
|
||||
u32 iflag1; /* 0x30 */
|
||||
u32 ctrl2; /* 0x34 */
|
||||
union { /* 0x34 */
|
||||
u32 gfwr_mx28; /* MX28, MX53 */
|
||||
u32 ctrl2; /* MX6, VF610 */
|
||||
};
|
||||
u32 esr2; /* 0x38 */
|
||||
u32 imeur; /* 0x3c */
|
||||
u32 lrfr; /* 0x40 */
|
||||
|
@ -232,7 +243,11 @@ struct flexcan_regs {
|
|||
* size conf'ed via ctrl2::RFFN
|
||||
* (mx6, vf610)
|
||||
*/
|
||||
u32 _reserved4[408];
|
||||
u32 _reserved4[256]; /* 0x480 */
|
||||
u32 rximr[64]; /* 0x880 */
|
||||
u32 _reserved5[24]; /* 0x980 */
|
||||
u32 gfwr_mx6; /* 0x9e0 - MX6 */
|
||||
u32 _reserved6[63]; /* 0x9e4 */
|
||||
u32 mecr; /* 0xae0 */
|
||||
u32 erriar; /* 0xae4 */
|
||||
u32 erridpr; /* 0xae8 */
|
||||
|
@ -249,31 +264,36 @@ struct flexcan_devtype_data {
|
|||
|
||||
struct flexcan_priv {
|
||||
struct can_priv can;
|
||||
struct napi_struct napi;
|
||||
struct can_rx_offload offload;
|
||||
|
||||
struct flexcan_regs __iomem *regs;
|
||||
u32 reg_esr;
|
||||
struct flexcan_mb __iomem *tx_mb;
|
||||
struct flexcan_mb __iomem *tx_mb_reserved;
|
||||
u8 tx_mb_idx;
|
||||
u32 reg_ctrl_default;
|
||||
u32 reg_imask1_default;
|
||||
u32 reg_imask2_default;
|
||||
|
||||
struct clk *clk_ipg;
|
||||
struct clk *clk_per;
|
||||
struct flexcan_platform_data *pdata;
|
||||
const struct flexcan_devtype_data *devtype_data;
|
||||
struct regulator *reg_xceiver;
|
||||
};
|
||||
|
||||
static struct flexcan_devtype_data fsl_p1010_devtype_data = {
|
||||
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
|
||||
.quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE,
|
||||
};
|
||||
|
||||
static struct flexcan_devtype_data fsl_imx28_devtype_data;
|
||||
static const struct flexcan_devtype_data fsl_imx28_devtype_data;
|
||||
|
||||
static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
|
||||
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG,
|
||||
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
|
||||
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
|
||||
FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
|
||||
};
|
||||
|
||||
static struct flexcan_devtype_data fsl_vf610_devtype_data = {
|
||||
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_DISABLE_MECR,
|
||||
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
|
||||
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
|
||||
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const flexcan_bittiming_const = {
|
||||
|
@ -331,13 +351,6 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
|
|||
return regulator_disable(priv->reg_xceiver);
|
||||
}
|
||||
|
||||
static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
|
||||
u32 reg_esr)
|
||||
{
|
||||
return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
|
||||
(reg_esr & FLEXCAN_ESR_ERR_BUS);
|
||||
}
|
||||
|
||||
static int flexcan_chip_enable(struct flexcan_priv *priv)
|
||||
{
|
||||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
|
@ -468,7 +481,6 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
|
|||
static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
const struct flexcan_priv *priv = netdev_priv(dev);
|
||||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
struct can_frame *cf = (struct can_frame *)skb->data;
|
||||
u32 can_id;
|
||||
u32 data;
|
||||
|
@ -491,68 +503,73 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (cf->can_dlc > 0) {
|
||||
data = be32_to_cpup((__be32 *)&cf->data[0]);
|
||||
flexcan_write(data, ®s->mb[FLEXCAN_TX_BUF_ID].data[0]);
|
||||
flexcan_write(data, &priv->tx_mb->data[0]);
|
||||
}
|
||||
if (cf->can_dlc > 3) {
|
||||
data = be32_to_cpup((__be32 *)&cf->data[4]);
|
||||
flexcan_write(data, ®s->mb[FLEXCAN_TX_BUF_ID].data[1]);
|
||||
flexcan_write(data, &priv->tx_mb->data[1]);
|
||||
}
|
||||
|
||||
can_put_echo_skb(skb, dev, 0);
|
||||
|
||||
flexcan_write(can_id, ®s->mb[FLEXCAN_TX_BUF_ID].can_id);
|
||||
flexcan_write(ctrl, ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
|
||||
flexcan_write(can_id, &priv->tx_mb->can_id);
|
||||
flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
|
||||
|
||||
/* Errata ERR005829 step8:
|
||||
* Write twice INACTIVE(0x8) code to first MB.
|
||||
*/
|
||||
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
|
||||
®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
|
||||
&priv->tx_mb_reserved->can_ctrl);
|
||||
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
|
||||
®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
|
||||
&priv->tx_mb_reserved->can_ctrl);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void do_bus_err(struct net_device *dev,
|
||||
struct can_frame *cf, u32 reg_esr)
|
||||
static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
|
||||
{
|
||||
struct flexcan_priv *priv = netdev_priv(dev);
|
||||
int rx_errors = 0, tx_errors = 0;
|
||||
struct sk_buff *skb;
|
||||
struct can_frame *cf;
|
||||
bool rx_errors = false, tx_errors = false;
|
||||
|
||||
skb = alloc_can_err_skb(dev, &cf);
|
||||
if (unlikely(!skb))
|
||||
return;
|
||||
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
|
||||
if (reg_esr & FLEXCAN_ESR_BIT1_ERR) {
|
||||
netdev_dbg(dev, "BIT1_ERR irq\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT1;
|
||||
tx_errors = 1;
|
||||
tx_errors = true;
|
||||
}
|
||||
if (reg_esr & FLEXCAN_ESR_BIT0_ERR) {
|
||||
netdev_dbg(dev, "BIT0_ERR irq\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT0;
|
||||
tx_errors = 1;
|
||||
tx_errors = true;
|
||||
}
|
||||
if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
|
||||
netdev_dbg(dev, "ACK_ERR irq\n");
|
||||
cf->can_id |= CAN_ERR_ACK;
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
|
||||
tx_errors = 1;
|
||||
tx_errors = true;
|
||||
}
|
||||
if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
|
||||
netdev_dbg(dev, "CRC_ERR irq\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT;
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
|
||||
rx_errors = 1;
|
||||
rx_errors = true;
|
||||
}
|
||||
if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
|
||||
netdev_dbg(dev, "FRM_ERR irq\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
rx_errors = 1;
|
||||
rx_errors = true;
|
||||
}
|
||||
if (reg_esr & FLEXCAN_ESR_STF_ERR) {
|
||||
netdev_dbg(dev, "STF_ERR irq\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
rx_errors = 1;
|
||||
rx_errors = true;
|
||||
}
|
||||
|
||||
priv->can.can_stats.bus_error++;
|
||||
|
@ -560,32 +577,16 @@ static void do_bus_err(struct net_device *dev,
|
|||
dev->stats.rx_errors++;
|
||||
if (tx_errors)
|
||||
dev->stats.tx_errors++;
|
||||
|
||||
can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
|
||||
}
|
||||
|
||||
static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct can_frame *cf;
|
||||
|
||||
skb = alloc_can_err_skb(dev, &cf);
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
|
||||
do_bus_err(dev, cf, reg_esr);
|
||||
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += cf->can_dlc;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
|
||||
static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
|
||||
{
|
||||
struct flexcan_priv *priv = netdev_priv(dev);
|
||||
struct sk_buff *skb;
|
||||
struct can_frame *cf;
|
||||
enum can_state new_state = 0, rx_state = 0, tx_state = 0;
|
||||
enum can_state new_state, rx_state, tx_state;
|
||||
int flt;
|
||||
struct can_berr_counter bec;
|
||||
|
||||
|
@ -606,33 +607,63 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
|
|||
|
||||
/* state hasn't changed */
|
||||
if (likely(new_state == priv->can.state))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
skb = alloc_can_err_skb(dev, &cf);
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
can_change_state(dev, cf, tx_state, rx_state);
|
||||
|
||||
if (unlikely(new_state == CAN_STATE_BUS_OFF))
|
||||
can_bus_off(dev);
|
||||
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += cf->can_dlc;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
return 1;
|
||||
can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
|
||||
}
|
||||
|
||||
static void flexcan_read_fifo(const struct net_device *dev,
|
||||
struct can_frame *cf)
|
||||
static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
|
||||
{
|
||||
const struct flexcan_priv *priv = netdev_priv(dev);
|
||||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
struct flexcan_mb __iomem *mb = ®s->mb[0];
|
||||
u32 reg_ctrl, reg_id;
|
||||
return container_of(offload, struct flexcan_priv, offload);
|
||||
}
|
||||
|
||||
static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
||||
struct can_frame *cf,
|
||||
u32 *timestamp, unsigned int n)
|
||||
{
|
||||
struct flexcan_priv *priv = rx_offload_to_priv(offload);
|
||||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
struct flexcan_mb __iomem *mb = ®s->mb[n];
|
||||
u32 reg_ctrl, reg_id, reg_iflag1;
|
||||
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||
u32 code;
|
||||
|
||||
do {
|
||||
reg_ctrl = flexcan_read(&mb->can_ctrl);
|
||||
} while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
|
||||
|
||||
/* is this MB empty? */
|
||||
code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
|
||||
if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
|
||||
(code != FLEXCAN_MB_CODE_RX_OVERRUN))
|
||||
return 0;
|
||||
|
||||
if (code == FLEXCAN_MB_CODE_RX_OVERRUN) {
|
||||
/* This MB was overrun, we lost data */
|
||||
offload->dev->stats.rx_over_errors++;
|
||||
offload->dev->stats.rx_errors++;
|
||||
}
|
||||
} else {
|
||||
reg_iflag1 = flexcan_read(®s->iflag1);
|
||||
if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
|
||||
return 0;
|
||||
|
||||
reg_ctrl = flexcan_read(&mb->can_ctrl);
|
||||
}
|
||||
|
||||
/* increase timstamp to full 32 bit */
|
||||
*timestamp = reg_ctrl << 16;
|
||||
|
||||
reg_ctrl = flexcan_read(&mb->can_ctrl);
|
||||
reg_id = flexcan_read(&mb->can_id);
|
||||
if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
|
||||
cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
|
||||
|
@ -647,69 +678,31 @@ static void flexcan_read_fifo(const struct net_device *dev,
|
|||
*(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
|
||||
|
||||
/* mark as read */
|
||||
flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1);
|
||||
flexcan_read(®s->timer);
|
||||
}
|
||||
|
||||
static int flexcan_read_frame(struct net_device *dev)
|
||||
{
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
struct can_frame *cf;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = alloc_can_skb(dev, &cf);
|
||||
if (unlikely(!skb)) {
|
||||
stats->rx_dropped++;
|
||||
return 0;
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||
/* Clear IRQ */
|
||||
if (n < 32)
|
||||
flexcan_write(BIT(n), ®s->iflag1);
|
||||
else
|
||||
flexcan_write(BIT(n - 32), ®s->iflag2);
|
||||
} else {
|
||||
flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1);
|
||||
flexcan_read(®s->timer);
|
||||
}
|
||||
|
||||
flexcan_read_fifo(dev, cf);
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->can_dlc;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
can_led_event(dev, CAN_LED_EVENT_RX);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int flexcan_poll(struct napi_struct *napi, int quota)
|
||||
|
||||
static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
|
||||
{
|
||||
struct net_device *dev = napi->dev;
|
||||
const struct flexcan_priv *priv = netdev_priv(dev);
|
||||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
u32 reg_iflag1, reg_esr;
|
||||
int work_done = 0;
|
||||
u32 iflag1, iflag2;
|
||||
|
||||
/* The error bits are cleared on read,
|
||||
* use saved value from irq handler.
|
||||
*/
|
||||
reg_esr = flexcan_read(®s->esr) | priv->reg_esr;
|
||||
iflag2 = flexcan_read(®s->iflag2) & priv->reg_imask2_default;
|
||||
iflag1 = flexcan_read(®s->iflag1) & priv->reg_imask1_default &
|
||||
~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
|
||||
|
||||
/* handle state changes */
|
||||
work_done += flexcan_poll_state(dev, reg_esr);
|
||||
|
||||
/* handle RX-FIFO */
|
||||
reg_iflag1 = flexcan_read(®s->iflag1);
|
||||
while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE &&
|
||||
work_done < quota) {
|
||||
work_done += flexcan_read_frame(dev);
|
||||
reg_iflag1 = flexcan_read(®s->iflag1);
|
||||
}
|
||||
|
||||
/* report bus errors */
|
||||
if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota)
|
||||
work_done += flexcan_poll_bus_err(dev, reg_esr);
|
||||
|
||||
if (work_done < quota) {
|
||||
napi_complete(napi);
|
||||
/* enable IRQs */
|
||||
flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1);
|
||||
flexcan_write(priv->reg_ctrl_default, ®s->ctrl);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
return (u64)iflag2 << 32 | iflag1;
|
||||
}
|
||||
|
||||
static irqreturn_t flexcan_irq(int irq, void *dev_id)
|
||||
|
@ -718,55 +711,70 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
|
|||
struct net_device_stats *stats = &dev->stats;
|
||||
struct flexcan_priv *priv = netdev_priv(dev);
|
||||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
irqreturn_t handled = IRQ_NONE;
|
||||
u32 reg_iflag1, reg_esr;
|
||||
|
||||
reg_iflag1 = flexcan_read(®s->iflag1);
|
||||
reg_esr = flexcan_read(®s->esr);
|
||||
|
||||
/* ACK all bus error and state change IRQ sources */
|
||||
if (reg_esr & FLEXCAN_ESR_ALL_INT)
|
||||
flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr);
|
||||
/* reception interrupt */
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||
u64 reg_iflag;
|
||||
int ret;
|
||||
|
||||
/* schedule NAPI in case of:
|
||||
* - rx IRQ
|
||||
* - state change IRQ
|
||||
* - bus error IRQ and bus error reporting is activated
|
||||
*/
|
||||
if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) ||
|
||||
(reg_esr & FLEXCAN_ESR_ERR_STATE) ||
|
||||
flexcan_has_and_handle_berr(priv, reg_esr)) {
|
||||
/* The error bits are cleared on read,
|
||||
* save them for later use.
|
||||
*/
|
||||
priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS;
|
||||
flexcan_write(FLEXCAN_IFLAG_DEFAULT &
|
||||
~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->imask1);
|
||||
flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
|
||||
®s->ctrl);
|
||||
napi_schedule(&priv->napi);
|
||||
}
|
||||
while ((reg_iflag = flexcan_read_reg_iflag_rx(priv))) {
|
||||
handled = IRQ_HANDLED;
|
||||
ret = can_rx_offload_irq_offload_timestamp(&priv->offload,
|
||||
reg_iflag);
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
|
||||
handled = IRQ_HANDLED;
|
||||
can_rx_offload_irq_offload_fifo(&priv->offload);
|
||||
}
|
||||
|
||||
/* FIFO overflow */
|
||||
if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
|
||||
flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1);
|
||||
dev->stats.rx_over_errors++;
|
||||
dev->stats.rx_errors++;
|
||||
/* FIFO overflow interrupt */
|
||||
if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
|
||||
handled = IRQ_HANDLED;
|
||||
flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1);
|
||||
dev->stats.rx_over_errors++;
|
||||
dev->stats.rx_errors++;
|
||||
}
|
||||
}
|
||||
|
||||
/* transmission complete interrupt */
|
||||
if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) {
|
||||
if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
|
||||
handled = IRQ_HANDLED;
|
||||
stats->tx_bytes += can_get_echo_skb(dev, 0);
|
||||
stats->tx_packets++;
|
||||
can_led_event(dev, CAN_LED_EVENT_TX);
|
||||
|
||||
/* after sending a RTR frame MB is in RX mode */
|
||||
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
|
||||
®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
|
||||
flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1);
|
||||
&priv->tx_mb->can_ctrl);
|
||||
flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag1);
|
||||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
reg_esr = flexcan_read(®s->esr);
|
||||
|
||||
/* ACK all bus error and state change IRQ sources */
|
||||
if (reg_esr & FLEXCAN_ESR_ALL_INT) {
|
||||
handled = IRQ_HANDLED;
|
||||
flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr);
|
||||
}
|
||||
|
||||
/* state change interrupt */
|
||||
if (reg_esr & FLEXCAN_ESR_ERR_STATE)
|
||||
flexcan_irq_state(dev, reg_esr);
|
||||
|
||||
/* bus error IRQ - handle if bus error reporting is activated */
|
||||
if ((reg_esr & FLEXCAN_ESR_ERR_BUS) &&
|
||||
(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
|
||||
flexcan_irq_bus_err(dev, reg_esr);
|
||||
|
||||
return handled;
|
||||
}
|
||||
|
||||
static void flexcan_set_bittiming(struct net_device *dev)
|
||||
|
@ -839,14 +847,23 @@ static int flexcan_chip_start(struct net_device *dev)
|
|||
* only supervisor access
|
||||
* enable warning int
|
||||
* disable local echo
|
||||
* enable individual RX masking
|
||||
* choose format C
|
||||
* set max mailbox number
|
||||
*/
|
||||
reg_mcr = flexcan_read(®s->mcr);
|
||||
reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
|
||||
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
|
||||
FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS |
|
||||
FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
|
||||
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
|
||||
FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
|
||||
FLEXCAN_MCR_IDAM_C;
|
||||
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||
reg_mcr &= ~FLEXCAN_MCR_FEN;
|
||||
reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last);
|
||||
} else {
|
||||
reg_mcr |= FLEXCAN_MCR_FEN |
|
||||
FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
|
||||
}
|
||||
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
|
||||
flexcan_write(reg_mcr, ®s->mcr);
|
||||
|
||||
|
@ -883,19 +900,31 @@ static int flexcan_chip_start(struct net_device *dev)
|
|||
netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
|
||||
flexcan_write(reg_ctrl, ®s->ctrl);
|
||||
|
||||
if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
|
||||
reg_ctrl2 = flexcan_read(®s->ctrl2);
|
||||
reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
|
||||
flexcan_write(reg_ctrl2, ®s->ctrl2);
|
||||
}
|
||||
|
||||
/* clear and invalidate all mailboxes first */
|
||||
for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->mb); i++) {
|
||||
for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
|
||||
flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
|
||||
®s->mb[i].can_ctrl);
|
||||
}
|
||||
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
|
||||
flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
|
||||
®s->mb[i].can_ctrl);
|
||||
}
|
||||
|
||||
/* Errata ERR005829: mark first TX mailbox as INACTIVE */
|
||||
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
|
||||
®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
|
||||
&priv->tx_mb_reserved->can_ctrl);
|
||||
|
||||
/* mark TX mailbox as INACTIVE */
|
||||
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
|
||||
®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
|
||||
&priv->tx_mb->can_ctrl);
|
||||
|
||||
/* acceptance mask/acceptance code (accept everything) */
|
||||
flexcan_write(0x0, ®s->rxgmask);
|
||||
|
@ -905,6 +934,10 @@ static int flexcan_chip_start(struct net_device *dev)
|
|||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
|
||||
flexcan_write(0x0, ®s->rxfgmask);
|
||||
|
||||
/* clear acceptance filters */
|
||||
for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
|
||||
flexcan_write(0, ®s->rximr[i]);
|
||||
|
||||
/* On Vybrid, disable memory error detection interrupts
|
||||
* and freeze mode.
|
||||
* This also works around errata e5295 which generates
|
||||
|
@ -942,7 +975,8 @@ static int flexcan_chip_start(struct net_device *dev)
|
|||
/* enable interrupts atomically */
|
||||
disable_irq(dev->irq);
|
||||
flexcan_write(priv->reg_ctrl_default, ®s->ctrl);
|
||||
flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1);
|
||||
flexcan_write(priv->reg_imask1_default, ®s->imask1);
|
||||
flexcan_write(priv->reg_imask2_default, ®s->imask2);
|
||||
enable_irq(dev->irq);
|
||||
|
||||
/* print chip status */
|
||||
|
@ -972,6 +1006,7 @@ static void flexcan_chip_stop(struct net_device *dev)
|
|||
flexcan_chip_disable(priv);
|
||||
|
||||
/* Disable all interrupts */
|
||||
flexcan_write(0, ®s->imask2);
|
||||
flexcan_write(0, ®s->imask1);
|
||||
flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
|
||||
®s->ctrl);
|
||||
|
@ -1008,7 +1043,7 @@ static int flexcan_open(struct net_device *dev)
|
|||
|
||||
can_led_event(dev, CAN_LED_EVENT_OPEN);
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
can_rx_offload_enable(&priv->offload);
|
||||
netif_start_queue(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -1030,7 +1065,7 @@ static int flexcan_close(struct net_device *dev)
|
|||
struct flexcan_priv *priv = netdev_priv(dev);
|
||||
|
||||
netif_stop_queue(dev);
|
||||
napi_disable(&priv->napi);
|
||||
can_rx_offload_disable(&priv->offload);
|
||||
flexcan_chip_stop(dev);
|
||||
|
||||
free_irq(dev->irq, dev);
|
||||
|
@ -1104,8 +1139,9 @@ static int register_flexcandev(struct net_device *dev)
|
|||
flexcan_write(reg, ®s->mcr);
|
||||
|
||||
/* Currently we only support newer versions of this core
|
||||
* featuring a RX FIFO. Older cores found on some Coldfire
|
||||
* derivates are not yet supported.
|
||||
* featuring a RX hardware FIFO (although this driver doesn't
|
||||
* make use of it on some cores). Older cores, found on some
|
||||
* Coldfire derivates are not tested.
|
||||
*/
|
||||
reg = flexcan_read(®s->mcr);
|
||||
if (!(reg & FLEXCAN_MCR_FEN)) {
|
||||
|
@ -1208,6 +1244,9 @@ static int flexcan_probe(struct platform_device *pdev)
|
|||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, dev);
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
|
||||
dev->netdev_ops = &flexcan_netdev_ops;
|
||||
dev->irq = irq;
|
||||
dev->flags |= IFF_ECHO;
|
||||
|
@ -1223,14 +1262,41 @@ static int flexcan_probe(struct platform_device *pdev)
|
|||
priv->regs = regs;
|
||||
priv->clk_ipg = clk_ipg;
|
||||
priv->clk_per = clk_per;
|
||||
priv->pdata = dev_get_platdata(&pdev->dev);
|
||||
priv->devtype_data = devtype_data;
|
||||
priv->reg_xceiver = reg_xceiver;
|
||||
|
||||
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||
priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
|
||||
priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
|
||||
} else {
|
||||
priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
|
||||
priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
|
||||
}
|
||||
priv->tx_mb = ®s->mb[priv->tx_mb_idx];
|
||||
|
||||
platform_set_drvdata(pdev, dev);
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
|
||||
priv->reg_imask2_default = 0;
|
||||
|
||||
priv->offload.mailbox_read = flexcan_mailbox_read;
|
||||
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||
u64 imask;
|
||||
|
||||
priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
|
||||
priv->offload.mb_last = FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST;
|
||||
|
||||
imask = GENMASK_ULL(priv->offload.mb_last, priv->offload.mb_first);
|
||||
priv->reg_imask1_default |= imask;
|
||||
priv->reg_imask2_default |= imask >> 32;
|
||||
|
||||
err = can_rx_offload_add_timestamp(dev, &priv->offload);
|
||||
} else {
|
||||
priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
|
||||
FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
|
||||
err = can_rx_offload_add_fifo(dev, &priv->offload, FLEXCAN_NAPI_WEIGHT);
|
||||
}
|
||||
if (err)
|
||||
goto failed_offload;
|
||||
|
||||
err = register_flexcandev(dev);
|
||||
if (err) {
|
||||
|
@ -1245,6 +1311,7 @@ static int flexcan_probe(struct platform_device *pdev)
|
|||
|
||||
return 0;
|
||||
|
||||
failed_offload:
|
||||
failed_register:
|
||||
free_candev(dev);
|
||||
return err;
|
||||
|
@ -1256,7 +1323,7 @@ static int flexcan_remove(struct platform_device *pdev)
|
|||
struct flexcan_priv *priv = netdev_priv(dev);
|
||||
|
||||
unregister_flexcandev(dev);
|
||||
netif_napi_del(&priv->napi);
|
||||
can_rx_offload_del(&priv->offload);
|
||||
free_candev(dev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -578,7 +578,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
|
|||
work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
|
||||
|
||||
if (work_done < quota) {
|
||||
napi_complete(napi);
|
||||
napi_complete_done(napi, work_done);
|
||||
ifi_canfd_irq_enable(ndev, 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -1475,7 +1475,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
|
|||
/* We have processed all packets that the adapter had, but it
|
||||
* was less than our budget, stop polling */
|
||||
if (received < budget)
|
||||
napi_complete(napi);
|
||||
napi_complete_done(napi, received);
|
||||
|
||||
spin_lock_irqsave(&mod->lock, flags);
|
||||
|
||||
|
|
|
@ -730,7 +730,7 @@ static int m_can_poll(struct napi_struct *napi, int quota)
|
|||
work_done += m_can_do_rx_poll(dev, (quota - work_done));
|
||||
|
||||
if (work_done < quota) {
|
||||
napi_complete(napi);
|
||||
napi_complete_done(napi, work_done);
|
||||
m_can_enable_all_interrupts(priv);
|
||||
}
|
||||
|
||||
|
|
|
@ -695,7 +695,7 @@ static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
|
|||
}
|
||||
/* All packets processed */
|
||||
if (num_pkts < quota) {
|
||||
napi_complete(napi);
|
||||
napi_complete_done(napi, num_pkts);
|
||||
priv->ier |= RCAR_CAN_IER_RXFIE;
|
||||
writeb(priv->ier, &priv->regs->ier);
|
||||
}
|
||||
|
|
|
@ -1512,7 +1512,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
|
|||
|
||||
/* All packets processed */
|
||||
if (num_pkts < quota) {
|
||||
napi_complete(napi);
|
||||
napi_complete_done(napi, num_pkts);
|
||||
/* Enable Rx FIFO interrupts */
|
||||
rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
|
||||
RCANFD_RFCC_RFIE);
|
||||
|
|
|
@ -0,0 +1,289 @@
|
|||
/*
|
||||
* Copyright (c) 2014 David Jander, Protonic Holland
|
||||
* Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the version 2 of the GNU General Public License
|
||||
* as published by the Free Software Foundation
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/can/dev.h>
|
||||
#include <linux/can/rx-offload.h>
|
||||
|
||||
struct can_rx_offload_cb {
|
||||
u32 timestamp;
|
||||
};
|
||||
|
||||
static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
|
||||
|
||||
return (struct can_rx_offload_cb *)skb->cb;
|
||||
}
|
||||
|
||||
static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
|
||||
{
|
||||
if (offload->inc)
|
||||
return a <= b;
|
||||
else
|
||||
return a >= b;
|
||||
}
|
||||
|
||||
static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
|
||||
{
|
||||
if (offload->inc)
|
||||
return (*val)++;
|
||||
else
|
||||
return (*val)--;
|
||||
}
|
||||
|
||||
static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
|
||||
{
|
||||
struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
|
||||
struct net_device *dev = offload->dev;
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
struct sk_buff *skb;
|
||||
int work_done = 0;
|
||||
|
||||
while ((work_done < quota) &&
|
||||
(skb = skb_dequeue(&offload->skb_queue))) {
|
||||
struct can_frame *cf = (struct can_frame *)skb->data;
|
||||
|
||||
work_done++;
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->can_dlc;
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
if (work_done < quota) {
|
||||
napi_complete_done(napi, work_done);
|
||||
|
||||
/* Check if there was another interrupt */
|
||||
if (!skb_queue_empty(&offload->skb_queue))
|
||||
napi_reschedule(&offload->napi);
|
||||
}
|
||||
|
||||
can_led_event(offload->dev, CAN_LED_EVENT_RX);
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
|
||||
int (*compare)(struct sk_buff *a, struct sk_buff *b))
|
||||
{
|
||||
struct sk_buff *pos, *insert = (struct sk_buff *)head;
|
||||
|
||||
skb_queue_reverse_walk(head, pos) {
|
||||
const struct can_rx_offload_cb *cb_pos, *cb_new;
|
||||
|
||||
cb_pos = can_rx_offload_get_cb(pos);
|
||||
cb_new = can_rx_offload_get_cb(new);
|
||||
|
||||
netdev_dbg(new->dev,
|
||||
"%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
|
||||
__func__,
|
||||
cb_pos->timestamp, cb_new->timestamp,
|
||||
cb_new->timestamp - cb_pos->timestamp,
|
||||
skb_queue_len(head));
|
||||
|
||||
if (compare(pos, new) < 0)
|
||||
continue;
|
||||
insert = pos;
|
||||
break;
|
||||
}
|
||||
|
||||
__skb_queue_after(head, insert, new);
|
||||
}
|
||||
|
||||
static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
|
||||
{
|
||||
const struct can_rx_offload_cb *cb_a, *cb_b;
|
||||
|
||||
cb_a = can_rx_offload_get_cb(a);
|
||||
cb_b = can_rx_offload_get_cb(b);
|
||||
|
||||
/* Substract two u32 and return result as int, to keep
|
||||
* difference steady around the u32 overflow.
|
||||
*/
|
||||
return cb_b->timestamp - cb_a->timestamp;
|
||||
}
|
||||
|
||||
static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct can_rx_offload_cb *cb;
|
||||
struct can_frame *cf;
|
||||
int ret;
|
||||
|
||||
/* If queue is full or skb not available, read to discard mailbox */
|
||||
if (likely(skb_queue_len(&offload->skb_queue) <=
|
||||
offload->skb_queue_len_max))
|
||||
skb = alloc_can_skb(offload->dev, &cf);
|
||||
|
||||
if (!skb) {
|
||||
struct can_frame cf_overflow;
|
||||
u32 timestamp;
|
||||
|
||||
ret = offload->mailbox_read(offload, &cf_overflow,
|
||||
×tamp, n);
|
||||
if (ret)
|
||||
offload->dev->stats.rx_dropped++;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cb = can_rx_offload_get_cb(skb);
|
||||
ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
|
||||
if (!ret) {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
|
||||
{
|
||||
struct sk_buff_head skb_queue;
|
||||
unsigned int i;
|
||||
|
||||
__skb_queue_head_init(&skb_queue);
|
||||
|
||||
for (i = offload->mb_first;
|
||||
can_rx_offload_le(offload, i, offload->mb_last);
|
||||
can_rx_offload_inc(offload, &i)) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!(pending & BIT_ULL(i)))
|
||||
continue;
|
||||
|
||||
skb = can_rx_offload_offload_one(offload, i);
|
||||
if (!skb)
|
||||
break;
|
||||
|
||||
__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
|
||||
}
|
||||
|
||||
if (!skb_queue_empty(&skb_queue)) {
|
||||
unsigned long flags;
|
||||
u32 queue_len;
|
||||
|
||||
spin_lock_irqsave(&offload->skb_queue.lock, flags);
|
||||
skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
|
||||
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
|
||||
|
||||
if ((queue_len = skb_queue_len(&offload->skb_queue)) >
|
||||
(offload->skb_queue_len_max / 8))
|
||||
netdev_dbg(offload->dev, "%s: queue_len=%d\n",
|
||||
__func__, queue_len);
|
||||
|
||||
can_rx_offload_schedule(offload);
|
||||
}
|
||||
|
||||
return skb_queue_len(&skb_queue);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
|
||||
|
||||
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int received = 0;
|
||||
|
||||
while ((skb = can_rx_offload_offload_one(offload, 0))) {
|
||||
skb_queue_tail(&offload->skb_queue, skb);
|
||||
received++;
|
||||
}
|
||||
|
||||
if (received)
|
||||
can_rx_offload_schedule(offload);
|
||||
|
||||
return received;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
|
||||
|
||||
int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb)
|
||||
{
|
||||
if (skb_queue_len(&offload->skb_queue) >
|
||||
offload->skb_queue_len_max)
|
||||
return -ENOMEM;
|
||||
|
||||
skb_queue_tail(&offload->skb_queue, skb);
|
||||
can_rx_offload_schedule(offload);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb);
|
||||
|
||||
static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
|
||||
{
|
||||
offload->dev = dev;
|
||||
|
||||
/* Limit queue len to 4x the weight (rounted to next power of two) */
|
||||
offload->skb_queue_len_max = 2 << fls(weight);
|
||||
offload->skb_queue_len_max *= 4;
|
||||
skb_queue_head_init(&offload->skb_queue);
|
||||
|
||||
can_rx_offload_reset(offload);
|
||||
netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
|
||||
|
||||
dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
|
||||
__func__, offload->skb_queue_len_max);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
|
||||
{
|
||||
unsigned int weight;
|
||||
|
||||
if (offload->mb_first > BITS_PER_LONG_LONG ||
|
||||
offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
|
||||
return -EINVAL;
|
||||
|
||||
if (offload->mb_first < offload->mb_last) {
|
||||
offload->inc = true;
|
||||
weight = offload->mb_last - offload->mb_first;
|
||||
} else {
|
||||
offload->inc = false;
|
||||
weight = offload->mb_first - offload->mb_last;
|
||||
}
|
||||
|
||||
return can_rx_offload_init_queue(dev, offload, weight);;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
|
||||
|
||||
int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
|
||||
{
|
||||
if (!offload->mailbox_read)
|
||||
return -EINVAL;
|
||||
|
||||
return can_rx_offload_init_queue(dev, offload, weight);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
|
||||
|
||||
void can_rx_offload_enable(struct can_rx_offload *offload)
|
||||
{
|
||||
can_rx_offload_reset(offload);
|
||||
napi_enable(&offload->napi);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(can_rx_offload_enable);
|
||||
|
||||
void can_rx_offload_del(struct can_rx_offload *offload)
|
||||
{
|
||||
netif_napi_del(&offload->napi);
|
||||
skb_queue_purge(&offload->skb_queue);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(can_rx_offload_del);
|
||||
|
||||
void can_rx_offload_reset(struct can_rx_offload *offload)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(can_rx_offload_reset);
|
|
@ -310,7 +310,7 @@ pcmcia_bad:
|
|||
pcmcia_failed:
|
||||
pcmcia_disable_device(pcmcia);
|
||||
pcmcia->priv = NULL;
|
||||
return ret ?: -ENODEV;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct pcmcia_device_id softingcs_ids[] = {
|
||||
|
|
|
@ -726,7 +726,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
|
|||
can_led_event(ndev, CAN_LED_EVENT_RX);
|
||||
|
||||
if (work_done < quota) {
|
||||
napi_complete(napi);
|
||||
napi_complete_done(napi, work_done);
|
||||
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
|
||||
ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
|
||||
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
|
||||
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
|
||||
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
|
||||
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
|
||||
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
|
||||
|
||||
obj-y += b53/
|
||||
|
|
|
@ -712,7 +712,7 @@ static unsigned int b53_get_mib_size(struct b53_device *dev)
|
|||
return B53_MIBS_SIZE;
|
||||
}
|
||||
|
||||
static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
|
||||
void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
const struct b53_mib_desc *mibs = b53_get_mib(dev);
|
||||
|
@ -723,9 +723,9 @@ static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
|
|||
memcpy(data + i * ETH_GSTRING_LEN,
|
||||
mibs[i].name, ETH_GSTRING_LEN);
|
||||
}
|
||||
EXPORT_SYMBOL(b53_get_strings);
|
||||
|
||||
static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
|
||||
uint64_t *data)
|
||||
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
const struct b53_mib_desc *mibs = b53_get_mib(dev);
|
||||
|
@ -756,13 +756,15 @@ static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
|
|||
|
||||
mutex_unlock(&dev->stats_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(b53_get_ethtool_stats);
|
||||
|
||||
static int b53_get_sset_count(struct dsa_switch *ds)
|
||||
int b53_get_sset_count(struct dsa_switch *ds)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
|
||||
return b53_get_mib_size(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(b53_get_sset_count);
|
||||
|
||||
static int b53_setup(struct dsa_switch *ds)
|
||||
{
|
||||
|
@ -921,15 +923,15 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
|
|||
}
|
||||
}
|
||||
|
||||
static int b53_vlan_filtering(struct dsa_switch *ds, int port,
|
||||
bool vlan_filtering)
|
||||
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_vlan_filtering);
|
||||
|
||||
static int b53_vlan_prepare(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
struct switchdev_trans *trans)
|
||||
int b53_vlan_prepare(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
struct switchdev_trans *trans)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
|
||||
|
@ -943,10 +945,11 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_vlan_prepare);
|
||||
|
||||
static void b53_vlan_add(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
struct switchdev_trans *trans)
|
||||
void b53_vlan_add(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
struct switchdev_trans *trans)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
||||
|
@ -977,9 +980,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
|
|||
b53_fast_age_vlan(dev, vid);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(b53_vlan_add);
|
||||
|
||||
static int b53_vlan_del(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan)
|
||||
int b53_vlan_del(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
||||
|
@ -1015,10 +1019,11 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_vlan_del);
|
||||
|
||||
static int b53_vlan_dump(struct dsa_switch *ds, int port,
|
||||
struct switchdev_obj_port_vlan *vlan,
|
||||
int (*cb)(struct switchdev_obj *obj))
|
||||
int b53_vlan_dump(struct dsa_switch *ds, int port,
|
||||
struct switchdev_obj_port_vlan *vlan,
|
||||
int (*cb)(struct switchdev_obj *obj))
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
u16 vid, vid_start = 0, pvid;
|
||||
|
@ -1057,6 +1062,7 @@ static int b53_vlan_dump(struct dsa_switch *ds, int port,
|
|||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_vlan_dump);
|
||||
|
||||
/* Address Resolution Logic routines */
|
||||
static int b53_arl_op_wait(struct b53_device *dev)
|
||||
|
@ -1137,7 +1143,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
|
|||
int ret;
|
||||
|
||||
/* Convert the array into a 64-bit MAC */
|
||||
mac = b53_mac_to_u64(addr);
|
||||
mac = ether_addr_to_u64(addr);
|
||||
|
||||
/* Perform a read for the given MAC and VID */
|
||||
b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
|
||||
|
@ -1175,9 +1181,9 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
|
|||
return b53_arl_rw_op(dev, 0);
|
||||
}
|
||||
|
||||
static int b53_fdb_prepare(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_fdb *fdb,
|
||||
struct switchdev_trans *trans)
|
||||
int b53_fdb_prepare(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_fdb *fdb,
|
||||
struct switchdev_trans *trans)
|
||||
{
|
||||
struct b53_device *priv = ds->priv;
|
||||
|
||||
|
@ -1189,24 +1195,27 @@ static int b53_fdb_prepare(struct dsa_switch *ds, int port,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_fdb_prepare);
|
||||
|
||||
static void b53_fdb_add(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_fdb *fdb,
|
||||
struct switchdev_trans *trans)
|
||||
void b53_fdb_add(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_fdb *fdb,
|
||||
struct switchdev_trans *trans)
|
||||
{
|
||||
struct b53_device *priv = ds->priv;
|
||||
|
||||
if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
|
||||
pr_err("%s: failed to add MAC address\n", __func__);
|
||||
}
|
||||
EXPORT_SYMBOL(b53_fdb_add);
|
||||
|
||||
static int b53_fdb_del(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_fdb *fdb)
|
||||
int b53_fdb_del(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_fdb *fdb)
|
||||
{
|
||||
struct b53_device *priv = ds->priv;
|
||||
|
||||
return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
|
||||
}
|
||||
EXPORT_SYMBOL(b53_fdb_del);
|
||||
|
||||
static int b53_arl_search_wait(struct b53_device *dev)
|
||||
{
|
||||
|
@ -1258,9 +1267,9 @@ static int b53_fdb_copy(struct net_device *dev, int port,
|
|||
return cb(&fdb->obj);
|
||||
}
|
||||
|
||||
static int b53_fdb_dump(struct dsa_switch *ds, int port,
|
||||
struct switchdev_obj_port_fdb *fdb,
|
||||
int (*cb)(struct switchdev_obj *obj))
|
||||
int b53_fdb_dump(struct dsa_switch *ds, int port,
|
||||
struct switchdev_obj_port_fdb *fdb,
|
||||
int (*cb)(struct switchdev_obj *obj))
|
||||
{
|
||||
struct b53_device *priv = ds->priv;
|
||||
struct net_device *dev = ds->ports[port].netdev;
|
||||
|
@ -1297,9 +1306,9 @@ static int b53_fdb_dump(struct dsa_switch *ds, int port,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_fdb_dump);
|
||||
|
||||
static int b53_br_join(struct dsa_switch *ds, int port,
|
||||
struct net_device *bridge)
|
||||
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
s8 cpu_port = ds->dst->cpu_port;
|
||||
|
@ -1317,11 +1326,10 @@ static int b53_br_join(struct dsa_switch *ds, int port,
|
|||
b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
|
||||
}
|
||||
|
||||
dev->ports[port].bridge_dev = bridge;
|
||||
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
|
||||
|
||||
b53_for_each_port(dev, i) {
|
||||
if (dev->ports[i].bridge_dev != bridge)
|
||||
if (ds->ports[i].bridge_dev != br)
|
||||
continue;
|
||||
|
||||
/* Add this local port to the remote port VLAN control
|
||||
|
@ -1343,11 +1351,11 @@ static int b53_br_join(struct dsa_switch *ds, int port,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_br_join);
|
||||
|
||||
static void b53_br_leave(struct dsa_switch *ds, int port)
|
||||
void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
struct net_device *bridge = dev->ports[port].bridge_dev;
|
||||
struct b53_vlan *vl = &dev->vlans[0];
|
||||
s8 cpu_port = ds->dst->cpu_port;
|
||||
unsigned int i;
|
||||
|
@ -1357,7 +1365,7 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
|
|||
|
||||
b53_for_each_port(dev, i) {
|
||||
/* Don't touch the remaining ports */
|
||||
if (dev->ports[i].bridge_dev != bridge)
|
||||
if (ds->ports[i].bridge_dev != br)
|
||||
continue;
|
||||
|
||||
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
|
||||
|
@ -1372,7 +1380,6 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
|
|||
|
||||
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
|
||||
dev->ports[port].vlan_ctl_mask = pvlan;
|
||||
dev->ports[port].bridge_dev = NULL;
|
||||
|
||||
if (is5325(dev) || is5365(dev))
|
||||
pvid = 1;
|
||||
|
@ -1393,8 +1400,9 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
|
|||
b53_set_vlan_entry(dev, pvid, vl);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(b53_br_leave);
|
||||
|
||||
static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
|
||||
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
u8 hw_state;
|
||||
|
@ -1426,21 +1434,88 @@ static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
|
|||
reg |= hw_state;
|
||||
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
|
||||
}
|
||||
EXPORT_SYMBOL(b53_br_set_stp_state);
|
||||
|
||||
static void b53_br_fast_age(struct dsa_switch *ds, int port)
|
||||
void b53_br_fast_age(struct dsa_switch *ds, int port)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
|
||||
if (b53_fast_age_port(dev, port))
|
||||
dev_err(ds->dev, "fast ageing failed\n");
|
||||
}
|
||||
EXPORT_SYMBOL(b53_br_fast_age);
|
||||
|
||||
static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
|
||||
{
|
||||
return DSA_TAG_PROTO_NONE;
|
||||
}
|
||||
|
||||
static struct dsa_switch_ops b53_switch_ops = {
|
||||
int b53_mirror_add(struct dsa_switch *ds, int port,
|
||||
struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
u16 reg, loc;
|
||||
|
||||
if (ingress)
|
||||
loc = B53_IG_MIR_CTL;
|
||||
else
|
||||
loc = B53_EG_MIR_CTL;
|
||||
|
||||
b53_read16(dev, B53_MGMT_PAGE, loc, ®);
|
||||
reg &= ~MIRROR_MASK;
|
||||
reg |= BIT(port);
|
||||
b53_write16(dev, B53_MGMT_PAGE, loc, reg);
|
||||
|
||||
b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
|
||||
reg &= ~CAP_PORT_MASK;
|
||||
reg |= mirror->to_local_port;
|
||||
reg |= MIRROR_EN;
|
||||
b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_mirror_add);
|
||||
|
||||
void b53_mirror_del(struct dsa_switch *ds, int port,
|
||||
struct dsa_mall_mirror_tc_entry *mirror)
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
bool loc_disable = false, other_loc_disable = false;
|
||||
u16 reg, loc;
|
||||
|
||||
if (mirror->ingress)
|
||||
loc = B53_IG_MIR_CTL;
|
||||
else
|
||||
loc = B53_EG_MIR_CTL;
|
||||
|
||||
/* Update the desired ingress/egress register */
|
||||
b53_read16(dev, B53_MGMT_PAGE, loc, ®);
|
||||
reg &= ~BIT(port);
|
||||
if (!(reg & MIRROR_MASK))
|
||||
loc_disable = true;
|
||||
b53_write16(dev, B53_MGMT_PAGE, loc, reg);
|
||||
|
||||
/* Now look at the other one to know if we can disable mirroring
|
||||
* entirely
|
||||
*/
|
||||
if (mirror->ingress)
|
||||
b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®);
|
||||
else
|
||||
b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®);
|
||||
if (!(reg & MIRROR_MASK))
|
||||
other_loc_disable = true;
|
||||
|
||||
b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
|
||||
/* Both no longer have ports, let's disable mirroring */
|
||||
if (loc_disable && other_loc_disable) {
|
||||
reg &= ~MIRROR_EN;
|
||||
reg &= ~mirror->to_local_port;
|
||||
}
|
||||
b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
|
||||
}
|
||||
EXPORT_SYMBOL(b53_mirror_del);
|
||||
|
||||
static const struct dsa_switch_ops b53_switch_ops = {
|
||||
.get_tag_protocol = b53_get_tag_protocol,
|
||||
.setup = b53_setup,
|
||||
.get_strings = b53_get_strings,
|
||||
|
@ -1464,6 +1539,8 @@ static struct dsa_switch_ops b53_switch_ops = {
|
|||
.port_fdb_dump = b53_fdb_dump,
|
||||
.port_fdb_add = b53_fdb_add,
|
||||
.port_fdb_del = b53_fdb_del,
|
||||
.port_mirror_add = b53_mirror_add,
|
||||
.port_mirror_del = b53_mirror_del,
|
||||
};
|
||||
|
||||
struct b53_chip_data {
|
||||
|
@ -1672,6 +1749,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
|
||||
},
|
||||
{
|
||||
.chip_id = BCM7278_DEVICE_ID,
|
||||
.dev_name = "BCM7278",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0x1ff,
|
||||
.arl_entries= 4,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
|
||||
},
|
||||
};
|
||||
|
||||
static int b53_switch_init(struct b53_device *dev)
|
||||
|
@ -1765,14 +1854,15 @@ struct b53_device *b53_switch_alloc(struct device *base,
|
|||
struct dsa_switch *ds;
|
||||
struct b53_device *dev;
|
||||
|
||||
ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL);
|
||||
ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
|
||||
if (!ds)
|
||||
return NULL;
|
||||
|
||||
dev = (struct b53_device *)(ds + 1);
|
||||
dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
ds->priv = dev;
|
||||
ds->dev = base;
|
||||
dev->dev = base;
|
||||
|
||||
dev->ds = ds;
|
||||
|
@ -1869,7 +1959,7 @@ int b53_switch_register(struct b53_device *dev)
|
|||
|
||||
pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
|
||||
|
||||
return dsa_register_switch(dev->ds, dev->ds->dev->of_node);
|
||||
return dsa_register_switch(dev->ds, dev->ds->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(b53_switch_register);
|
||||
|
||||
|
|
|
@ -375,18 +375,7 @@ static struct mdio_driver b53_mdio_driver = {
|
|||
.of_match_table = b53_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init b53_mdio_driver_register(void)
|
||||
{
|
||||
return mdio_driver_register(&b53_mdio_driver);
|
||||
}
|
||||
module_init(b53_mdio_driver_register);
|
||||
|
||||
static void __exit b53_mdio_driver_unregister(void)
|
||||
{
|
||||
mdio_driver_unregister(&b53_mdio_driver);
|
||||
}
|
||||
module_exit(b53_mdio_driver_unregister);
|
||||
mdio_module_driver(b53_mdio_driver);
|
||||
|
||||
MODULE_DESCRIPTION("B53 MDIO access driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <net/dsa.h>
|
||||
|
||||
#include "b53_regs.h"
|
||||
|
@ -61,6 +62,7 @@ enum {
|
|||
BCM53019_DEVICE_ID = 0x53019,
|
||||
BCM58XX_DEVICE_ID = 0x5800,
|
||||
BCM7445_DEVICE_ID = 0x7445,
|
||||
BCM7278_DEVICE_ID = 0x7278,
|
||||
};
|
||||
|
||||
#define B53_N_PORTS 9
|
||||
|
@ -68,7 +70,6 @@ enum {
|
|||
|
||||
struct b53_port {
|
||||
u16 vlan_ctl_mask;
|
||||
struct net_device *bridge_dev;
|
||||
};
|
||||
|
||||
struct b53_vlan {
|
||||
|
@ -178,7 +179,8 @@ static inline int is5301x(struct b53_device *dev)
|
|||
static inline int is58xx(struct b53_device *dev)
|
||||
{
|
||||
return dev->chip_id == BCM58XX_DEVICE_ID ||
|
||||
dev->chip_id == BCM7445_DEVICE_ID;
|
||||
dev->chip_id == BCM7445_DEVICE_ID ||
|
||||
dev->chip_id == BCM7278_DEVICE_ID;
|
||||
}
|
||||
|
||||
#define B53_CPU_PORT_25 5
|
||||
|
@ -325,25 +327,6 @@ struct b53_arl_entry {
|
|||
u8 is_static:1;
|
||||
};
|
||||
|
||||
static inline void b53_mac_from_u64(u64 src, u8 *dst)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
|
||||
}
|
||||
|
||||
static inline u64 b53_mac_to_u64(const u8 *src)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 dst = 0;
|
||||
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
|
||||
|
||||
return dst;
|
||||
}
|
||||
|
||||
static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
|
||||
u64 mac_vid, u32 fwd_entry)
|
||||
{
|
||||
|
@ -352,14 +335,14 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
|
|||
ent->is_valid = !!(fwd_entry & ARLTBL_VALID);
|
||||
ent->is_age = !!(fwd_entry & ARLTBL_AGE);
|
||||
ent->is_static = !!(fwd_entry & ARLTBL_STATIC);
|
||||
b53_mac_from_u64(mac_vid, ent->mac);
|
||||
u64_to_ether_addr(mac_vid, ent->mac);
|
||||
ent->vid = mac_vid >> ARLTBL_VID_S;
|
||||
}
|
||||
|
||||
static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
|
||||
const struct b53_arl_entry *ent)
|
||||
{
|
||||
*mac_vid = b53_mac_to_u64(ent->mac);
|
||||
*mac_vid = ether_addr_to_u64(ent->mac);
|
||||
*mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
|
||||
*fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK;
|
||||
if (ent->is_valid)
|
||||
|
@ -392,4 +375,41 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
|
|||
return -ENOENT;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Exported functions towards other drivers */
|
||||
void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
|
||||
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
|
||||
int b53_get_sset_count(struct dsa_switch *ds);
|
||||
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
|
||||
void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
|
||||
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
|
||||
void b53_br_fast_age(struct dsa_switch *ds, int port);
|
||||
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
|
||||
int b53_vlan_prepare(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
struct switchdev_trans *trans);
|
||||
void b53_vlan_add(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
struct switchdev_trans *trans);
|
||||
int b53_vlan_del(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan);
|
||||
int b53_vlan_dump(struct dsa_switch *ds, int port,
|
||||
struct switchdev_obj_port_vlan *vlan,
|
||||
int (*cb)(struct switchdev_obj *obj));
|
||||
int b53_fdb_prepare(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_fdb *fdb,
|
||||
struct switchdev_trans *trans);
|
||||
void b53_fdb_add(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_fdb *fdb,
|
||||
struct switchdev_trans *trans);
|
||||
int b53_fdb_del(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_fdb *fdb);
|
||||
int b53_fdb_dump(struct dsa_switch *ds, int port,
|
||||
struct switchdev_obj_port_fdb *fdb,
|
||||
int (*cb)(struct switchdev_obj *obj));
|
||||
int b53_mirror_add(struct dsa_switch *ds, int port,
|
||||
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
|
||||
void b53_mirror_del(struct dsa_switch *ds, int port,
|
||||
struct dsa_mall_mirror_tc_entry *mirror);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -206,6 +206,38 @@
|
|||
#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */
|
||||
#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
|
||||
|
||||
/* Mirror capture control register (16 bit) */
|
||||
#define B53_MIR_CAP_CTL 0x10
|
||||
#define CAP_PORT_MASK 0xf
|
||||
#define BLK_NOT_MIR BIT(14)
|
||||
#define MIRROR_EN BIT(15)
|
||||
|
||||
/* Ingress mirror control register (16 bit) */
|
||||
#define B53_IG_MIR_CTL 0x12
|
||||
#define MIRROR_MASK 0x1ff
|
||||
#define DIV_EN BIT(13)
|
||||
#define MIRROR_FILTER_MASK 0x3
|
||||
#define MIRROR_FILTER_SHIFT 14
|
||||
#define MIRROR_ALL 0
|
||||
#define MIRROR_DA 1
|
||||
#define MIRROR_SA 2
|
||||
|
||||
/* Ingress mirror divider register (16 bit) */
|
||||
#define B53_IG_MIR_DIV 0x14
|
||||
#define IN_MIRROR_DIV_MASK 0x3ff
|
||||
|
||||
/* Ingress mirror MAC address register (48 bit) */
|
||||
#define B53_IG_MIR_MAC 0x16
|
||||
|
||||
/* Egress mirror control register (16 bit) */
|
||||
#define B53_EG_MIR_CTL 0x1C
|
||||
|
||||
/* Egress mirror divider register (16 bit) */
|
||||
#define B53_EG_MIR_DIV 0x1E
|
||||
|
||||
/* Egress mirror MAC address register (48 bit) */
|
||||
#define B53_EG_MIR_MAC 0x20
|
||||
|
||||
/* Device ID register (8 or 32 bit) */
|
||||
#define B53_DEVICE_ID 0x30
|
||||
|
||||
|
|
|
@ -61,30 +61,10 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
|
|||
}
|
||||
}
|
||||
|
||||
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
|
||||
static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port)
|
||||
{
|
||||
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
||||
u32 reg, val;
|
||||
|
||||
/* Enable the port memories */
|
||||
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
|
||||
reg &= ~P_TXQ_PSM_VDD(port);
|
||||
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
|
||||
|
||||
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
|
||||
reg = core_readl(priv, CORE_IMP_CTL);
|
||||
reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
|
||||
reg &= ~(RX_DIS | TX_DIS);
|
||||
core_writel(priv, reg, CORE_IMP_CTL);
|
||||
|
||||
/* Enable forwarding */
|
||||
core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
|
||||
|
||||
/* Enable IMP port in dumb mode */
|
||||
reg = core_readl(priv, CORE_SWITCH_CTRL);
|
||||
reg |= MII_DUMB_FWDG_EN;
|
||||
core_writel(priv, reg, CORE_SWITCH_CTRL);
|
||||
|
||||
/* Resolve which bit controls the Broadcom tag */
|
||||
switch (port) {
|
||||
case 8:
|
||||
|
@ -119,11 +99,43 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
|
|||
reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
|
||||
reg &= ~(1 << port);
|
||||
core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
|
||||
}
|
||||
|
||||
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
|
||||
{
|
||||
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
||||
u32 reg, offset;
|
||||
|
||||
if (priv->type == BCM7445_DEVICE_ID)
|
||||
offset = CORE_STS_OVERRIDE_IMP;
|
||||
else
|
||||
offset = CORE_STS_OVERRIDE_IMP2;
|
||||
|
||||
/* Enable the port memories */
|
||||
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
|
||||
reg &= ~P_TXQ_PSM_VDD(port);
|
||||
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
|
||||
|
||||
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
|
||||
reg = core_readl(priv, CORE_IMP_CTL);
|
||||
reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
|
||||
reg &= ~(RX_DIS | TX_DIS);
|
||||
core_writel(priv, reg, CORE_IMP_CTL);
|
||||
|
||||
/* Enable forwarding */
|
||||
core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
|
||||
|
||||
/* Enable IMP port in dumb mode */
|
||||
reg = core_readl(priv, CORE_SWITCH_CTRL);
|
||||
reg |= MII_DUMB_FWDG_EN;
|
||||
core_writel(priv, reg, CORE_SWITCH_CTRL);
|
||||
|
||||
bcm_sf2_brcm_hdr_setup(priv, port);
|
||||
|
||||
/* Force link status for IMP port */
|
||||
reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
|
||||
reg = core_readl(priv, offset);
|
||||
reg |= (MII_SW_OR | LINK_STS);
|
||||
core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
|
||||
core_writel(priv, reg, offset);
|
||||
}
|
||||
|
||||
static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
|
||||
|
@ -217,6 +229,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
|
|||
{
|
||||
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
||||
s8 cpu_port = ds->dst[ds->index].cpu_port;
|
||||
unsigned int i;
|
||||
u32 reg;
|
||||
|
||||
/* Clear the memory power down */
|
||||
|
@ -224,6 +237,18 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
|
|||
reg &= ~P_TXQ_PSM_VDD(port);
|
||||
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
|
||||
|
||||
/* Enable Broadcom tags for that port if requested */
|
||||
if (priv->brcm_tag_mask & BIT(port))
|
||||
bcm_sf2_brcm_hdr_setup(priv, port);
|
||||
|
||||
/* Configure Traffic Class to QoS mapping, allow each priority to map
|
||||
* to a different queue number
|
||||
*/
|
||||
reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
|
||||
for (i = 0; i < 8; i++)
|
||||
reg |= i << (PRT_TO_QID_SHIFT * i);
|
||||
core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
|
||||
|
||||
/* Clear the Rx and Tx disable bits and set to no spanning tree */
|
||||
core_writel(priv, 0, CORE_G_PCTL_PORT(port));
|
||||
|
||||
|
@ -503,6 +528,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
|
|||
|
||||
if (mode == PHY_INTERFACE_MODE_MOCA)
|
||||
priv->moca_port = port_num;
|
||||
|
||||
if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
|
||||
priv->brcm_tag_mask |= 1 << port_num;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -591,7 +619,12 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
|
|||
struct ethtool_eee *p = &priv->port_sts[port].eee;
|
||||
u32 id_mode_dis = 0, port_mode;
|
||||
const char *str = NULL;
|
||||
u32 reg;
|
||||
u32 reg, offset;
|
||||
|
||||
if (priv->type == BCM7445_DEVICE_ID)
|
||||
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
|
||||
else
|
||||
offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
|
||||
|
||||
switch (phydev->interface) {
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
|
@ -662,7 +695,7 @@ force_link:
|
|||
if (phydev->duplex == DUPLEX_FULL)
|
||||
reg |= DUPLX_MODE;
|
||||
|
||||
core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
|
||||
core_writel(priv, reg, offset);
|
||||
|
||||
if (!phydev->is_pseudo_fixed_link)
|
||||
p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
|
||||
|
@ -672,9 +705,14 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
|
|||
struct fixed_phy_status *status)
|
||||
{
|
||||
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
||||
u32 duplex, pause;
|
||||
u32 duplex, pause, offset;
|
||||
u32 reg;
|
||||
|
||||
if (priv->type == BCM7445_DEVICE_ID)
|
||||
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
|
||||
else
|
||||
offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
|
||||
|
||||
duplex = core_readl(priv, CORE_DUPSTS);
|
||||
pause = core_readl(priv, CORE_PAUSESTS);
|
||||
|
||||
|
@ -703,13 +741,13 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
|
|||
status->duplex = !!(duplex & (1 << port));
|
||||
}
|
||||
|
||||
reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
|
||||
reg = core_readl(priv, offset);
|
||||
reg |= SW_OVERRIDE;
|
||||
if (status->link)
|
||||
reg |= LINK_STS;
|
||||
else
|
||||
reg &= ~LINK_STS;
|
||||
core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
|
||||
core_writel(priv, reg, offset);
|
||||
|
||||
if ((pause & (1 << port)) &&
|
||||
(pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
|
||||
|
@ -977,10 +1015,110 @@ static struct b53_io_ops bcm_sf2_io_ops = {
|
|||
.write64 = bcm_sf2_core_write64,
|
||||
};
|
||||
|
||||
static const struct dsa_switch_ops bcm_sf2_ops = {
|
||||
.get_tag_protocol = bcm_sf2_sw_get_tag_protocol,
|
||||
.setup = bcm_sf2_sw_setup,
|
||||
.get_strings = b53_get_strings,
|
||||
.get_ethtool_stats = b53_get_ethtool_stats,
|
||||
.get_sset_count = b53_get_sset_count,
|
||||
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
|
||||
.adjust_link = bcm_sf2_sw_adjust_link,
|
||||
.fixed_link_update = bcm_sf2_sw_fixed_link_update,
|
||||
.suspend = bcm_sf2_sw_suspend,
|
||||
.resume = bcm_sf2_sw_resume,
|
||||
.get_wol = bcm_sf2_sw_get_wol,
|
||||
.set_wol = bcm_sf2_sw_set_wol,
|
||||
.port_enable = bcm_sf2_port_setup,
|
||||
.port_disable = bcm_sf2_port_disable,
|
||||
.get_eee = bcm_sf2_sw_get_eee,
|
||||
.set_eee = bcm_sf2_sw_set_eee,
|
||||
.port_bridge_join = b53_br_join,
|
||||
.port_bridge_leave = b53_br_leave,
|
||||
.port_stp_state_set = b53_br_set_stp_state,
|
||||
.port_fast_age = b53_br_fast_age,
|
||||
.port_vlan_filtering = b53_vlan_filtering,
|
||||
.port_vlan_prepare = b53_vlan_prepare,
|
||||
.port_vlan_add = b53_vlan_add,
|
||||
.port_vlan_del = b53_vlan_del,
|
||||
.port_vlan_dump = b53_vlan_dump,
|
||||
.port_fdb_prepare = b53_fdb_prepare,
|
||||
.port_fdb_dump = b53_fdb_dump,
|
||||
.port_fdb_add = b53_fdb_add,
|
||||
.port_fdb_del = b53_fdb_del,
|
||||
.get_rxnfc = bcm_sf2_get_rxnfc,
|
||||
.set_rxnfc = bcm_sf2_set_rxnfc,
|
||||
.port_mirror_add = b53_mirror_add,
|
||||
.port_mirror_del = b53_mirror_del,
|
||||
};
|
||||
|
||||
struct bcm_sf2_of_data {
|
||||
u32 type;
|
||||
const u16 *reg_offsets;
|
||||
unsigned int core_reg_align;
|
||||
};
|
||||
|
||||
/* Register offsets for the SWITCH_REG_* block */
|
||||
static const u16 bcm_sf2_7445_reg_offsets[] = {
|
||||
[REG_SWITCH_CNTRL] = 0x00,
|
||||
[REG_SWITCH_STATUS] = 0x04,
|
||||
[REG_DIR_DATA_WRITE] = 0x08,
|
||||
[REG_DIR_DATA_READ] = 0x0C,
|
||||
[REG_SWITCH_REVISION] = 0x18,
|
||||
[REG_PHY_REVISION] = 0x1C,
|
||||
[REG_SPHY_CNTRL] = 0x2C,
|
||||
[REG_RGMII_0_CNTRL] = 0x34,
|
||||
[REG_RGMII_1_CNTRL] = 0x40,
|
||||
[REG_RGMII_2_CNTRL] = 0x4c,
|
||||
[REG_LED_0_CNTRL] = 0x90,
|
||||
[REG_LED_1_CNTRL] = 0x94,
|
||||
[REG_LED_2_CNTRL] = 0x98,
|
||||
};
|
||||
|
||||
static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
|
||||
.type = BCM7445_DEVICE_ID,
|
||||
.core_reg_align = 0,
|
||||
.reg_offsets = bcm_sf2_7445_reg_offsets,
|
||||
};
|
||||
|
||||
static const u16 bcm_sf2_7278_reg_offsets[] = {
|
||||
[REG_SWITCH_CNTRL] = 0x00,
|
||||
[REG_SWITCH_STATUS] = 0x04,
|
||||
[REG_DIR_DATA_WRITE] = 0x08,
|
||||
[REG_DIR_DATA_READ] = 0x0c,
|
||||
[REG_SWITCH_REVISION] = 0x10,
|
||||
[REG_PHY_REVISION] = 0x14,
|
||||
[REG_SPHY_CNTRL] = 0x24,
|
||||
[REG_RGMII_0_CNTRL] = 0xe0,
|
||||
[REG_RGMII_1_CNTRL] = 0xec,
|
||||
[REG_RGMII_2_CNTRL] = 0xf8,
|
||||
[REG_LED_0_CNTRL] = 0x40,
|
||||
[REG_LED_1_CNTRL] = 0x4c,
|
||||
[REG_LED_2_CNTRL] = 0x58,
|
||||
};
|
||||
|
||||
static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
|
||||
.type = BCM7278_DEVICE_ID,
|
||||
.core_reg_align = 1,
|
||||
.reg_offsets = bcm_sf2_7278_reg_offsets,
|
||||
};
|
||||
|
||||
static const struct of_device_id bcm_sf2_of_match[] = {
|
||||
{ .compatible = "brcm,bcm7445-switch-v4.0",
|
||||
.data = &bcm_sf2_7445_data
|
||||
},
|
||||
{ .compatible = "brcm,bcm7278-switch-v4.0",
|
||||
.data = &bcm_sf2_7278_data
|
||||
},
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
|
||||
|
||||
static int bcm_sf2_sw_probe(struct platform_device *pdev)
|
||||
{
|
||||
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
|
||||
struct device_node *dn = pdev->dev.of_node;
|
||||
const struct of_device_id *of_id = NULL;
|
||||
const struct bcm_sf2_of_data *data;
|
||||
struct b53_platform_data *pdata;
|
||||
struct dsa_switch_ops *ops;
|
||||
struct bcm_sf2_priv *priv;
|
||||
|
@ -1008,42 +1146,38 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
|
|||
if (!pdata)
|
||||
return -ENOMEM;
|
||||
|
||||
of_id = of_match_node(bcm_sf2_of_match, dn);
|
||||
if (!of_id || !of_id->data)
|
||||
return -EINVAL;
|
||||
|
||||
data = of_id->data;
|
||||
|
||||
/* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
|
||||
priv->type = data->type;
|
||||
priv->reg_offsets = data->reg_offsets;
|
||||
priv->core_reg_align = data->core_reg_align;
|
||||
|
||||
/* Auto-detection using standard registers will not work, so
|
||||
* provide an indication of what kind of device we are for
|
||||
* b53_common to work with
|
||||
*/
|
||||
pdata->chip_id = BCM7445_DEVICE_ID;
|
||||
pdata->chip_id = priv->type;
|
||||
dev->pdata = pdata;
|
||||
|
||||
priv->dev = dev;
|
||||
ds = dev->ds;
|
||||
|
||||
/* Override the parts that are non-standard wrt. normal b53 devices */
|
||||
memcpy(ops, ds->ops, sizeof(*ops));
|
||||
ds->ops = ops;
|
||||
ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol;
|
||||
ds->ops->setup = bcm_sf2_sw_setup;
|
||||
ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags;
|
||||
ds->ops->adjust_link = bcm_sf2_sw_adjust_link;
|
||||
ds->ops->fixed_link_update = bcm_sf2_sw_fixed_link_update;
|
||||
ds->ops->suspend = bcm_sf2_sw_suspend;
|
||||
ds->ops->resume = bcm_sf2_sw_resume;
|
||||
ds->ops->get_wol = bcm_sf2_sw_get_wol;
|
||||
ds->ops->set_wol = bcm_sf2_sw_set_wol;
|
||||
ds->ops->port_enable = bcm_sf2_port_setup;
|
||||
ds->ops->port_disable = bcm_sf2_port_disable;
|
||||
ds->ops->get_eee = bcm_sf2_sw_get_eee;
|
||||
ds->ops->set_eee = bcm_sf2_sw_set_eee;
|
||||
|
||||
/* Avoid having DSA free our slave MDIO bus (checking for
|
||||
* ds->slave_mii_bus and ds->ops->phy_read being non-NULL)
|
||||
*/
|
||||
ds->ops->phy_read = NULL;
|
||||
ds->ops = &bcm_sf2_ops;
|
||||
|
||||
dev_set_drvdata(&pdev->dev, priv);
|
||||
|
||||
spin_lock_init(&priv->indir_lock);
|
||||
mutex_init(&priv->stats_mutex);
|
||||
mutex_init(&priv->cfp.lock);
|
||||
|
||||
/* CFP rule #0 cannot be used for specific classifications, flag it as
|
||||
* permanently used
|
||||
*/
|
||||
set_bit(0, priv->cfp.used);
|
||||
|
||||
bcm_sf2_identify_ports(priv, dn->child);
|
||||
|
||||
|
@ -1073,6 +1207,12 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = bcm_sf2_cfp_rst(priv);
|
||||
if (ret) {
|
||||
pr_err("failed to reset CFP\n");
|
||||
goto out_mdio;
|
||||
}
|
||||
|
||||
/* Disable all interrupts and request them */
|
||||
bcm_sf2_intr_disable(priv);
|
||||
|
||||
|
@ -1179,11 +1319,6 @@ static int bcm_sf2_resume(struct device *dev)
|
|||
static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
|
||||
bcm_sf2_suspend, bcm_sf2_resume);
|
||||
|
||||
static const struct of_device_id bcm_sf2_of_match[] = {
|
||||
{ .compatible = "brcm,bcm7445-switch-v4.0" },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
|
||||
|
||||
static struct platform_driver bcm_sf2_driver = {
|
||||
.probe = bcm_sf2_sw_probe,
|
||||
|
|
|
@ -52,6 +52,13 @@ struct bcm_sf2_port_status {
|
|||
struct ethtool_eee eee;
|
||||
};
|
||||
|
||||
struct bcm_sf2_cfp_priv {
|
||||
/* Mutex protecting concurrent accesses to the CFP registers */
|
||||
struct mutex lock;
|
||||
DECLARE_BITMAP(used, CFP_NUM_RULES);
|
||||
unsigned int rules_cnt;
|
||||
};
|
||||
|
||||
struct bcm_sf2_priv {
|
||||
/* Base registers, keep those in order with BCM_SF2_REGS_NAME */
|
||||
void __iomem *core;
|
||||
|
@ -61,6 +68,11 @@ struct bcm_sf2_priv {
|
|||
void __iomem *fcb;
|
||||
void __iomem *acb;
|
||||
|
||||
/* Register offsets indirection tables */
|
||||
u32 type;
|
||||
const u16 *reg_offsets;
|
||||
unsigned int core_reg_align;
|
||||
|
||||
/* spinlock protecting access to the indirect registers */
|
||||
spinlock_t indir_lock;
|
||||
|
||||
|
@ -95,6 +107,12 @@ struct bcm_sf2_priv {
|
|||
struct device_node *master_mii_dn;
|
||||
struct mii_bus *slave_mii_bus;
|
||||
struct mii_bus *master_mii_bus;
|
||||
|
||||
/* Bitmask of ports needing BRCM tags */
|
||||
unsigned int brcm_tag_mask;
|
||||
|
||||
/* CFP rules context */
|
||||
struct bcm_sf2_cfp_priv cfp;
|
||||
};
|
||||
|
||||
static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
|
||||
|
@ -104,6 +122,11 @@ static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
|
|||
return dev->priv;
|
||||
}
|
||||
|
||||
static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off)
|
||||
{
|
||||
return off << priv->core_reg_align;
|
||||
}
|
||||
|
||||
#define SF2_IO_MACRO(name) \
|
||||
static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \
|
||||
{ \
|
||||
|
@ -125,7 +148,7 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
|
|||
{ \
|
||||
u32 indir, dir; \
|
||||
spin_lock(&priv->indir_lock); \
|
||||
dir = __raw_readl(priv->name + off); \
|
||||
dir = name##_readl(priv, off); \
|
||||
indir = reg_readl(priv, REG_DIR_DATA_READ); \
|
||||
spin_unlock(&priv->indir_lock); \
|
||||
return (u64)indir << 32 | dir; \
|
||||
|
@ -135,7 +158,7 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
|
|||
{ \
|
||||
spin_lock(&priv->indir_lock); \
|
||||
reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \
|
||||
__raw_writel(lower_32_bits(val), priv->name + off); \
|
||||
name##_writel(priv, lower_32_bits(val), off); \
|
||||
spin_unlock(&priv->indir_lock); \
|
||||
}
|
||||
|
||||
|
@ -153,8 +176,28 @@ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
|
|||
priv->irq##which##_mask |= (mask); \
|
||||
} \
|
||||
|
||||
SF2_IO_MACRO(core);
|
||||
SF2_IO_MACRO(reg);
|
||||
static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off)
|
||||
{
|
||||
u32 tmp = bcm_sf2_mangle_addr(priv, off);
|
||||
return __raw_readl(priv->core + tmp);
|
||||
}
|
||||
|
||||
static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
|
||||
{
|
||||
u32 tmp = bcm_sf2_mangle_addr(priv, off);
|
||||
__raw_writel(val, priv->core + tmp);
|
||||
}
|
||||
|
||||
static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off)
|
||||
{
|
||||
return __raw_readl(priv->reg + priv->reg_offsets[off]);
|
||||
}
|
||||
|
||||
static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off)
|
||||
{
|
||||
__raw_writel(val, priv->reg + priv->reg_offsets[off]);
|
||||
}
|
||||
|
||||
SF2_IO64_MACRO(core);
|
||||
SF2_IO_MACRO(intrl2_0);
|
||||
SF2_IO_MACRO(intrl2_1);
|
||||
|
@ -164,4 +207,11 @@ SF2_IO_MACRO(acb);
|
|||
SWITCH_INTR_L2(0);
|
||||
SWITCH_INTR_L2(1);
|
||||
|
||||
/* RXNFC */
|
||||
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
|
||||
struct ethtool_rxnfc *nfc, u32 *rule_locs);
|
||||
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
|
||||
struct ethtool_rxnfc *nfc);
|
||||
int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
|
||||
|
||||
#endif /* __BCM_SF2_H */
|
||||
|
|
|
@ -0,0 +1,613 @@
|
|||
/*
|
||||
* Broadcom Starfighter 2 DSA switch CFP support
|
||||
*
|
||||
* Copyright (C) 2016, Broadcom
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <net/dsa.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
#include "bcm_sf2.h"
|
||||
#include "bcm_sf2_regs.h"
|
||||
|
||||
struct cfp_udf_layout {
|
||||
u8 slices[UDF_NUM_SLICES];
|
||||
u32 mask_value;
|
||||
|
||||
};
|
||||
|
||||
/* UDF slices layout for a TCPv4/UDPv4 specification */
|
||||
static const struct cfp_udf_layout udf_tcpip4_layout = {
|
||||
.slices = {
|
||||
/* End of L2, byte offset 12, src IP[0:15] */
|
||||
CFG_UDF_EOL2 | 6,
|
||||
/* End of L2, byte offset 14, src IP[16:31] */
|
||||
CFG_UDF_EOL2 | 7,
|
||||
/* End of L2, byte offset 16, dst IP[0:15] */
|
||||
CFG_UDF_EOL2 | 8,
|
||||
/* End of L2, byte offset 18, dst IP[16:31] */
|
||||
CFG_UDF_EOL2 | 9,
|
||||
/* End of L3, byte offset 0, src port */
|
||||
CFG_UDF_EOL3 | 0,
|
||||
/* End of L3, byte offset 2, dst port */
|
||||
CFG_UDF_EOL3 | 1,
|
||||
0, 0, 0
|
||||
},
|
||||
.mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
|
||||
};
|
||||
|
||||
static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
|
||||
{
|
||||
unsigned int i, count = 0;
|
||||
|
||||
for (i = 0; i < UDF_NUM_SLICES; i++) {
|
||||
if (layout[i] != 0)
|
||||
count++;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
|
||||
unsigned int slice_num,
|
||||
const u8 *layout)
|
||||
{
|
||||
u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < UDF_NUM_SLICES; i++)
|
||||
core_writel(priv, layout[i], offset + i * 4);
|
||||
}
|
||||
|
||||
static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
|
||||
{
|
||||
unsigned int timeout = 1000;
|
||||
u32 reg;
|
||||
|
||||
reg = core_readl(priv, CORE_CFP_ACC);
|
||||
reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
|
||||
reg |= OP_STR_DONE | op;
|
||||
core_writel(priv, reg, CORE_CFP_ACC);
|
||||
|
||||
do {
|
||||
reg = core_readl(priv, CORE_CFP_ACC);
|
||||
if (!(reg & OP_STR_DONE))
|
||||
break;
|
||||
|
||||
cpu_relax();
|
||||
} while (timeout--);
|
||||
|
||||
if (!timeout)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
|
||||
unsigned int addr)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
WARN_ON(addr >= CFP_NUM_RULES);
|
||||
|
||||
reg = core_readl(priv, CORE_CFP_ACC);
|
||||
reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
|
||||
reg |= addr << XCESS_ADDR_SHIFT;
|
||||
core_writel(priv, reg, CORE_CFP_ACC);
|
||||
}
|
||||
|
||||
static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
|
||||
{
|
||||
/* Entry #0 is reserved */
|
||||
return CFP_NUM_RULES - 1;
|
||||
}
|
||||
|
||||
static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
|
||||
struct ethtool_rx_flow_spec *fs)
|
||||
{
|
||||
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
||||
struct ethtool_tcpip4_spec *v4_spec;
|
||||
const struct cfp_udf_layout *layout;
|
||||
unsigned int slice_num, rule_index;
|
||||
unsigned int queue_num, port_num;
|
||||
u8 ip_proto, ip_frag;
|
||||
u8 num_udf;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
/* Check for unsupported extensions */
|
||||
if ((fs->flow_type & FLOW_EXT) &&
|
||||
(fs->m_ext.vlan_etype || fs->m_ext.data[1]))
|
||||
return -EINVAL;
|
||||
|
||||
if (fs->location != RX_CLS_LOC_ANY &&
|
||||
test_bit(fs->location, priv->cfp.used))
|
||||
return -EBUSY;
|
||||
|
||||
if (fs->location != RX_CLS_LOC_ANY &&
|
||||
fs->location > bcm_sf2_cfp_rule_size(priv))
|
||||
return -EINVAL;
|
||||
|
||||
ip_frag = be32_to_cpu(fs->m_ext.data[0]);
|
||||
|
||||
/* We do not support discarding packets, check that the
|
||||
* destination port is enabled and that we are within the
|
||||
* number of ports supported by the switch
|
||||
*/
|
||||
port_num = fs->ring_cookie / 8;
|
||||
|
||||
if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
|
||||
!(BIT(port_num) & ds->enabled_port_mask) ||
|
||||
port_num >= priv->hw_params.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
switch (fs->flow_type & ~FLOW_EXT) {
|
||||
case TCP_V4_FLOW:
|
||||
ip_proto = IPPROTO_TCP;
|
||||
v4_spec = &fs->h_u.tcp_ip4_spec;
|
||||
break;
|
||||
case UDP_V4_FLOW:
|
||||
ip_proto = IPPROTO_UDP;
|
||||
v4_spec = &fs->h_u.udp_ip4_spec;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* We only use one UDF slice for now */
|
||||
slice_num = 1;
|
||||
layout = &udf_tcpip4_layout;
|
||||
num_udf = bcm_sf2_get_num_udf_slices(layout->slices);
|
||||
|
||||
/* Apply the UDF layout for this filter */
|
||||
bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices);
|
||||
|
||||
/* Apply to all packets received through this port */
|
||||
core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
|
||||
|
||||
/* S-Tag status [31:30]
|
||||
* C-Tag status [29:28]
|
||||
* L2 framing [27:26]
|
||||
* L3 framing [25:24]
|
||||
* IP ToS [23:16]
|
||||
* IP proto [15:08]
|
||||
* IP Fragm [7]
|
||||
* Non 1st frag [6]
|
||||
* IP Authen [5]
|
||||
* TTL range [4:3]
|
||||
* PPPoE session [2]
|
||||
* Reserved [1]
|
||||
* UDF_Valid[8] [0]
|
||||
*/
|
||||
core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7,
|
||||
CORE_CFP_DATA_PORT(6));
|
||||
|
||||
/* UDF_Valid[7:0] [31:24]
|
||||
* S-Tag [23:8]
|
||||
* C-Tag [7:0]
|
||||
*/
|
||||
core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5));
|
||||
|
||||
/* C-Tag [31:24]
|
||||
* UDF_n_A8 [23:8]
|
||||
* UDF_n_A7 [7:0]
|
||||
*/
|
||||
core_writel(priv, 0, CORE_CFP_DATA_PORT(4));
|
||||
|
||||
/* UDF_n_A7 [31:24]
|
||||
* UDF_n_A6 [23:8]
|
||||
* UDF_n_A5 [7:0]
|
||||
*/
|
||||
core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8,
|
||||
CORE_CFP_DATA_PORT(3));
|
||||
|
||||
/* UDF_n_A5 [31:24]
|
||||
* UDF_n_A4 [23:8]
|
||||
* UDF_n_A3 [7:0]
|
||||
*/
|
||||
reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
|
||||
(u32)be16_to_cpu(v4_spec->psrc) << 8 |
|
||||
(be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
|
||||
core_writel(priv, reg, CORE_CFP_DATA_PORT(2));
|
||||
|
||||
/* UDF_n_A3 [31:24]
|
||||
* UDF_n_A2 [23:8]
|
||||
* UDF_n_A1 [7:0]
|
||||
*/
|
||||
reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
|
||||
(u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
|
||||
(be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
|
||||
core_writel(priv, reg, CORE_CFP_DATA_PORT(1));
|
||||
|
||||
/* UDF_n_A1 [31:24]
|
||||
* UDF_n_A0 [23:8]
|
||||
* Reserved [7:4]
|
||||
* Slice ID [3:2]
|
||||
* Slice valid [1:0]
|
||||
*/
|
||||
reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
|
||||
(u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
|
||||
SLICE_NUM(slice_num) | SLICE_VALID;
|
||||
core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
|
||||
|
||||
/* Source port map match */
|
||||
core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
|
||||
|
||||
/* Mask with the specific layout for IPv4 packets */
|
||||
core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6));
|
||||
|
||||
/* Mask all but valid UDFs */
|
||||
core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5));
|
||||
|
||||
/* Mask all */
|
||||
core_writel(priv, 0, CORE_CFP_MASK_PORT(4));
|
||||
|
||||
/* All other UDFs should be matched with the filter */
|
||||
core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3));
|
||||
core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2));
|
||||
core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1));
|
||||
core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0));
|
||||
|
||||
/* Locate the first rule available */
|
||||
if (fs->location == RX_CLS_LOC_ANY)
|
||||
rule_index = find_first_zero_bit(priv->cfp.used,
|
||||
bcm_sf2_cfp_rule_size(priv));
|
||||
else
|
||||
rule_index = fs->location;
|
||||
|
||||
/* Insert into TCAM now */
|
||||
bcm_sf2_cfp_rule_addr_set(priv, rule_index);
|
||||
|
||||
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
|
||||
if (ret) {
|
||||
pr_err("TCAM entry at addr %d failed\n", rule_index);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Replace ARL derived destination with DST_MAP derived, define
|
||||
* which port and queue this should be forwarded to.
|
||||
*
|
||||
* We have a small oddity where Port 6 just does not have a
|
||||
* valid bit here (so we subtract by one).
|
||||
*/
|
||||
queue_num = fs->ring_cookie % 8;
|
||||
if (port_num >= 7)
|
||||
port_num -= 1;
|
||||
|
||||
reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) |
|
||||
CHANGE_TC | queue_num << NEW_TC_SHIFT;
|
||||
|
||||
core_writel(priv, reg, CORE_ACT_POL_DATA0);
|
||||
|
||||
/* Set classification ID that needs to be put in Broadcom tag */
|
||||
core_writel(priv, rule_index << CHAIN_ID_SHIFT,
|
||||
CORE_ACT_POL_DATA1);
|
||||
|
||||
core_writel(priv, 0, CORE_ACT_POL_DATA2);
|
||||
|
||||
/* Configure policer RAM now */
|
||||
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
|
||||
if (ret) {
|
||||
pr_err("Policer entry at %d failed\n", rule_index);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Disable the policer */
|
||||
core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
|
||||
|
||||
/* Now the rate meter */
|
||||
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
|
||||
if (ret) {
|
||||
pr_err("Meter entry at %d failed\n", rule_index);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Turn on CFP for this rule now */
|
||||
reg = core_readl(priv, CORE_CFP_CTL_REG);
|
||||
reg |= BIT(port);
|
||||
core_writel(priv, reg, CORE_CFP_CTL_REG);
|
||||
|
||||
/* Flag the rule as being used and return it */
|
||||
set_bit(rule_index, priv->cfp.used);
|
||||
fs->location = rule_index;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
|
||||
u32 loc)
|
||||
{
|
||||
int ret;
|
||||
u32 reg;
|
||||
|
||||
/* Refuse deletion of unused rules, and the default reserved rule */
|
||||
if (!test_bit(loc, priv->cfp.used) || loc == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Indicate which rule we want to read */
|
||||
bcm_sf2_cfp_rule_addr_set(priv, loc);
|
||||
|
||||
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Clear its valid bits */
|
||||
reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
|
||||
reg &= ~SLICE_VALID;
|
||||
core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
|
||||
|
||||
/* Write back this entry into the TCAM now */
|
||||
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
clear_bit(loc, priv->cfp.used);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < sizeof(flow->m_u); i++)
|
||||
flow->m_u.hdata[i] ^= 0xff;
|
||||
|
||||
flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
|
||||
flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
|
||||
flow->m_ext.data[0] ^= cpu_to_be32(~0);
|
||||
flow->m_ext.data[1] ^= cpu_to_be32(~0);
|
||||
}
|
||||
|
||||
static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
|
||||
struct ethtool_rxnfc *nfc, bool search)
|
||||
{
|
||||
struct ethtool_tcpip4_spec *v4_spec;
|
||||
unsigned int queue_num;
|
||||
u16 src_dst_port;
|
||||
u32 reg, ipv4;
|
||||
int ret;
|
||||
|
||||
if (!search) {
|
||||
bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
|
||||
|
||||
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reg = core_readl(priv, CORE_ACT_POL_DATA0);
|
||||
|
||||
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
reg = core_readl(priv, CORE_ACT_POL_DATA0);
|
||||
}
|
||||
|
||||
/* Extract the destination port */
|
||||
nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
|
||||
DST_MAP_IB_MASK) - 1;
|
||||
|
||||
/* There is no Port 6, so we compensate for that here */
|
||||
if (nfc->fs.ring_cookie >= 6)
|
||||
nfc->fs.ring_cookie++;
|
||||
nfc->fs.ring_cookie *= 8;
|
||||
|
||||
/* Extract the destination queue */
|
||||
queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
|
||||
nfc->fs.ring_cookie += queue_num;
|
||||
|
||||
/* Extract the IP protocol */
|
||||
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
|
||||
switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
|
||||
case IPPROTO_TCP:
|
||||
nfc->fs.flow_type = TCP_V4_FLOW;
|
||||
v4_spec = &nfc->fs.h_u.tcp_ip4_spec;
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
nfc->fs.flow_type = UDP_V4_FLOW;
|
||||
v4_spec = &nfc->fs.h_u.udp_ip4_spec;
|
||||
break;
|
||||
default:
|
||||
/* Clear to exit the search process */
|
||||
if (search)
|
||||
core_readl(priv, CORE_CFP_DATA_PORT(7));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
v4_spec->tos = (reg >> 16) & IPPROTO_MASK;
|
||||
nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1);
|
||||
|
||||
reg = core_readl(priv, CORE_CFP_DATA_PORT(3));
|
||||
/* src port [15:8] */
|
||||
src_dst_port = reg << 8;
|
||||
|
||||
reg = core_readl(priv, CORE_CFP_DATA_PORT(2));
|
||||
/* src port [7:0] */
|
||||
src_dst_port |= (reg >> 24);
|
||||
|
||||
v4_spec->pdst = cpu_to_be16(src_dst_port);
|
||||
nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
|
||||
v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
|
||||
nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
|
||||
|
||||
/* IPv4 dst [15:8] */
|
||||
ipv4 = (reg & 0xff) << 8;
|
||||
reg = core_readl(priv, CORE_CFP_DATA_PORT(1));
|
||||
/* IPv4 dst [31:16] */
|
||||
ipv4 |= ((reg >> 8) & 0xffff) << 16;
|
||||
/* IPv4 dst [7:0] */
|
||||
ipv4 |= (reg >> 24) & 0xff;
|
||||
v4_spec->ip4dst = cpu_to_be32(ipv4);
|
||||
nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
|
||||
|
||||
/* IPv4 src [15:8] */
|
||||
ipv4 = (reg & 0xff) << 8;
|
||||
reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
|
||||
|
||||
if (!(reg & SLICE_VALID))
|
||||
return -EINVAL;
|
||||
|
||||
/* IPv4 src [7:0] */
|
||||
ipv4 |= (reg >> 24) & 0xff;
|
||||
/* IPv4 src [31:16] */
|
||||
ipv4 |= ((reg >> 8) & 0xffff) << 16;
|
||||
v4_spec->ip4src = cpu_to_be32(ipv4);
|
||||
nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
|
||||
|
||||
/* Read last to avoid next entry clobbering the results during search
|
||||
* operations
|
||||
*/
|
||||
reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
|
||||
if (!(reg & 1 << port))
|
||||
return -EINVAL;
|
||||
|
||||
bcm_sf2_invert_masks(&nfc->fs);
|
||||
|
||||
/* Put the TCAM size here */
|
||||
nfc->data = bcm_sf2_cfp_rule_size(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* We implement the search doing a TCAM search operation */
|
||||
static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
|
||||
int port, struct ethtool_rxnfc *nfc,
|
||||
u32 *rule_locs)
|
||||
{
|
||||
unsigned int index = 1, rules_cnt = 0;
|
||||
int ret;
|
||||
u32 reg;
|
||||
|
||||
/* Do not poll on OP_STR_DONE to be self-clearing for search
|
||||
* operations, we cannot use bcm_sf2_cfp_op here because it completes
|
||||
* on clearing OP_STR_DONE which won't clear until the entire search
|
||||
* operation is over.
|
||||
*/
|
||||
reg = core_readl(priv, CORE_CFP_ACC);
|
||||
reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
|
||||
reg |= index << XCESS_ADDR_SHIFT;
|
||||
reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
|
||||
reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE;
|
||||
core_writel(priv, reg, CORE_CFP_ACC);
|
||||
|
||||
do {
|
||||
/* Wait for results to be ready */
|
||||
reg = core_readl(priv, CORE_CFP_ACC);
|
||||
|
||||
/* Extract the address we are searching */
|
||||
index = reg >> XCESS_ADDR_SHIFT;
|
||||
index &= XCESS_ADDR_MASK;
|
||||
|
||||
/* We have a valid search result, so flag it accordingly */
|
||||
if (reg & SEARCH_STS) {
|
||||
ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
rule_locs[rules_cnt] = index;
|
||||
rules_cnt++;
|
||||
}
|
||||
|
||||
/* Search is over break out */
|
||||
if (!(reg & OP_STR_DONE))
|
||||
break;
|
||||
|
||||
} while (index < CFP_NUM_RULES);
|
||||
|
||||
/* Put the TCAM size here */
|
||||
nfc->data = bcm_sf2_cfp_rule_size(priv);
|
||||
nfc->rule_cnt = rules_cnt;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
|
||||
struct ethtool_rxnfc *nfc, u32 *rule_locs)
|
||||
{
|
||||
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&priv->cfp.lock);
|
||||
|
||||
switch (nfc->cmd) {
|
||||
case ETHTOOL_GRXCLSRLCNT:
|
||||
/* Subtract the default, unusable rule */
|
||||
nfc->rule_cnt = bitmap_weight(priv->cfp.used,
|
||||
CFP_NUM_RULES) - 1;
|
||||
/* We support specifying rule locations */
|
||||
nfc->data |= RX_CLS_LOC_SPECIAL;
|
||||
break;
|
||||
case ETHTOOL_GRXCLSRULE:
|
||||
ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false);
|
||||
break;
|
||||
case ETHTOOL_GRXCLSRLALL:
|
||||
ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->cfp.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
|
||||
struct ethtool_rxnfc *nfc)
|
||||
{
|
||||
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&priv->cfp.lock);
|
||||
|
||||
switch (nfc->cmd) {
|
||||
case ETHTOOL_SRXCLSRLINS:
|
||||
ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
|
||||
break;
|
||||
|
||||
case ETHTOOL_SRXCLSRLDEL:
|
||||
ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->cfp.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
|
||||
{
|
||||
unsigned int timeout = 1000;
|
||||
u32 reg;
|
||||
|
||||
reg = core_readl(priv, CORE_CFP_ACC);
|
||||
reg |= TCAM_RESET;
|
||||
core_writel(priv, reg, CORE_CFP_ACC);
|
||||
|
||||
do {
|
||||
reg = core_readl(priv, CORE_CFP_ACC);
|
||||
if (!(reg & TCAM_RESET))
|
||||
break;
|
||||
|
||||
cpu_relax();
|
||||
} while (timeout--);
|
||||
|
||||
if (!timeout)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
return 0;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue